From 52294a56c06d9965cbcf57cbaa8917106e43a08f Mon Sep 17 00:00:00 2001 From: Anmol Nagpal Date: Wed, 13 Dec 2023 03:53:46 +0530 Subject: [PATCH 01/13] feat: prepared addon for prometheus cloudwatch exporter --- .github/workflows/readme.yml | 4 +- ...etheus-cloudwatch-exporter-controller.yaml | 51 ++++ .../secret.yaml | 10 + _examples/complete/main.tf | 152 +++++------ _examples/complete/variables.tf | 6 + addons/fluent-bit/README.md | 2 +- addons/ingress-nginx/README.md | 4 +- addons/istio-ingress/README.md | 2 +- addons/karpenter/README.md | 2 +- addons/keda/README.md | 2 +- addons/kiali-server/README.md | 2 +- .../prometheus-cloudwatch-exporter/README.md | 98 +++++++ .../prometheus-cloudwatch-exporter.yaml | 245 ++++++++++++++++++ .../prometheus-cloudwatch-exporter/locals.tf | 44 ++++ addons/prometheus-cloudwatch-exporter/main.tf | 131 ++++++++++ .../prometheus-cloudwatch-exporter/outputs.tf | 11 + .../variables.tf | 49 ++++ .../versions.tf | 22 ++ addons/reloader/README.md | 2 +- main.tf | 12 + override_values.tf | 27 ++ variables.tf | 31 +++ 22 files changed, 823 insertions(+), 86 deletions(-) create mode 100644 _examples/complete/config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml create mode 100644 _examples/complete/config/prometheus-cloudwatch-exporter/secret.yaml create mode 100644 addons/prometheus-cloudwatch-exporter/README.md create mode 100644 addons/prometheus-cloudwatch-exporter/config/prometheus-cloudwatch-exporter.yaml create mode 100644 addons/prometheus-cloudwatch-exporter/locals.tf create mode 100644 addons/prometheus-cloudwatch-exporter/main.tf create mode 100644 addons/prometheus-cloudwatch-exporter/outputs.tf create mode 100644 addons/prometheus-cloudwatch-exporter/variables.tf create mode 100644 addons/prometheus-cloudwatch-exporter/versions.tf diff --git a/.github/workflows/readme.yml b/.github/workflows/readme.yml index d666796..1eb1d24 100644 --- a/.github/workflows/readme.yml +++ b/.github/workflows/readme.yml @@ -5,8 +5,6 @@ on: push: branches: - master - paths: - - '_examples/**' workflow_dispatch: jobs: @@ -90,7 +88,7 @@ jobs: - name: Generate TF Docs uses: terraform-docs/gh-actions@v1.0.0 with: - working-dir: addons/aws-ebs-csi-driver,addons/aws-efs-csi-driver,addons/aws-load-balancer-controller,addons/aws-node-termination-handler,addons/calico-tigera,addons/cluster-autoscaler,addons/external-secrets,addons/fluent-bit,addons/helm,addons/ingress-nginx,addons/istio-ingress,addons/karpenter,addons/kiali-server,addons/kubeclarity,addons/metrics-server,addons/nri-bundle,addons/velero,addons/kube-state-metrics,addons/keda,addons/cert-manager,addons/filebeat,addons/reloader,addons/external-dns,addons/redis,addons/actions-runner-controller + working-dir: addons/aws-ebs-csi-driver,addons/aws-efs-csi-driver,addons/aws-load-balancer-controller,addons/aws-node-termination-handler,addons/calico-tigera,addons/cluster-autoscaler,addons/external-secrets,addons/fluent-bit,addons/helm,addons/ingress-nginx,addons/istio-ingress,addons/karpenter,addons/kiali-server,addons/kubeclarity,addons/metrics-server,addons/nri-bundle,addons/velero,addons/kube-state-metrics,addons/keda,addons/cert-manager,addons/filebeat,addons/reloader,addons/external-dns,addons/redis,addons/actions-runner-controller,addons/prometheus-cloudwatch-exporter git-push: true template: |- diff --git a/_examples/complete/config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml b/_examples/complete/config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml new file mode 100644 index 0000000..1b0d32a --- /dev/null +++ b/_examples/complete/config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml @@ -0,0 +1,51 @@ +## Node affinity for particular node in which labels key is "Infra-Services" and value is "true" + +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "eks.amazonaws.com/nodegroup" + operator: In + values: + - "critical" + +## Using limits and requests +resources: + limits: + cpu: 300m + memory: 250Mi + requests: + cpu: 50m + memory: 150Mi + +# Configuration is rendered with `tpl` function, therefore you can use any Helm variables and/or templates here +config: |- + # This is the default configuration for prometheus-cloudwatch-exporter + region: eu-west-1 + period_seconds: 240 + metrics: + - aws_namespace: AWS/ELB + aws_metric_name: HealthyHostCount + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Average] + + - aws_namespace: AWS/ELB + aws_metric_name: UnHealthyHostCount + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Average] + + - aws_namespace: AWS/ELB + aws_metric_name: RequestCount + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Sum] + + - aws_namespace: AWS/ELB + aws_metric_name: Latency + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Average] + + - aws_namespace: AWS/ELB + aws_metric_name: SurgeQueueLength + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Maximum, Sum] diff --git a/_examples/complete/config/prometheus-cloudwatch-exporter/secret.yaml b/_examples/complete/config/prometheus-cloudwatch-exporter/secret.yaml new file mode 100644 index 0000000..13df3ad --- /dev/null +++ b/_examples/complete/config/prometheus-cloudwatch-exporter/secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: aws + namespace: monitoring # Namespace of Prometheus Cloudwatch Exporter addon destination +type: Opaque +data: + access_key: QUtJQVdGV0VLSlBTQU9INVlIRlQK # Encoded AWS Access key - Enter Correct AWS Access Key Encoded with base64 + secret_key: SjZLVDRTSkZIVG9leTQ1M2hadllmMWZpR2pYa0l1UkFmYkhLRHpUdAo= # Encoded AWS Secret Access key - Enter Correct AWS Secret Access Key Encoded with base64 + # Refer https://www.baeldung.com/linux/cli-base64-encode-decode this URL to Encode and Decode of String in Base64 \ No newline at end of file diff --git a/_examples/complete/main.tf b/_examples/complete/main.tf index cfd6af6..b423a2a 100644 --- a/_examples/complete/main.tf +++ b/_examples/complete/main.tf @@ -152,88 +152,90 @@ module "addons" { eks_cluster_name = module.eks.cluster_name # -- Enable Addons - metrics_server = true - cluster_autoscaler = true - aws_load_balancer_controller = true - aws_node_termination_handler = true - aws_efs_csi_driver = true - aws_ebs_csi_driver = true - kube_state_metrics = true - karpenter = false # -- Set to `false` or comment line to Uninstall Karpenter if installed using terraform. - calico_tigera = true - new_relic = true - kubeclarity = true - ingress_nginx = true - fluent_bit = true - velero = true - keda = true - certification_manager = true - filebeat = true - reloader = true - external_dns = true - redis = true - actions_runner_controller = true - - + metrics_server = false + cluster_autoscaler = false + aws_load_balancer_controller = false + aws_node_termination_handler = true + aws_efs_csi_driver = false + aws_ebs_csi_driver = false + kube_state_metrics = false + karpenter = false # -- Set to `false` or comment line to Uninstall Karpenter if installed using terraform. + calico_tigera = false + new_relic = false + kubeclarity = false + ingress_nginx = false + fluent_bit = false + velero = false + keda = false + certification_manager = false + filebeat = false + reloader = false + external_dns = false + redis = false + actions_runner_controller = false + prometheus_cloudwatch_exporter = true # -- Addons with mandatory variable - istio_ingress = true + istio_ingress = false istio_manifests = var.istio_manifests - kiali_server = true + kiali_server = false kiali_manifests = var.kiali_manifests - external_secrets = true + external_secrets = false # -- Path of override-values.yaml file - metrics_server_helm_config = { values = [file("./config/override-metrics-server.yaml")] } - cluster_autoscaler_helm_config = { values = [file("./config/override-cluster-autoscaler.yaml")] } - karpenter_helm_config = { values = [file("./config/override-karpenter.yaml")] } - aws_load_balancer_controller_helm_config = { values = [file("./config/override-aws-load-balancer-controller.yaml")] } - aws_node_termination_handler_helm_config = { values = [file("./config/override-aws-node-termination-handler.yaml")] } - aws_efs_csi_driver_helm_config = { values = [file("./config/override-aws-efs-csi-driver.yaml")] } - aws_ebs_csi_driver_helm_config = { values = [file("./config/override-aws-ebs-csi-driver.yaml")] } - calico_tigera_helm_config = { values = [file("./config/calico-tigera-values.yaml")] } - istio_ingress_helm_config = { values = [file("./config/istio/override-values.yaml")] } - kiali_server_helm_config = { values = [file("./config/kiali/override-values.yaml")] } - external_secrets_helm_config = { values = [file("./config/external-secret/override-values.yaml")] } - ingress_nginx_helm_config = { values = [file("./config/override-ingress-nginx.yaml")] } - kubeclarity_helm_config = { values = [file("./config/override-kubeclarity.yaml")] } - fluent_bit_helm_config = { values = [file("./config/override-fluent-bit.yaml")] } - velero_helm_config = { values = [file("./config/override-velero.yaml")] } - new_relic_helm_config = { values = [file("./config/override-new-relic.yaml")] } - kube_state_metrics_helm_config = { values = [file("./config/override-kube-state-matrics.yaml")] } - keda_helm_config = { values = [file("./config/keda/override-keda.yaml")] } - certification_manager_helm_config = { values = [file("./config/override-certification-manager.yaml")] } - filebeat_helm_config = { values = [file("./config/override-filebeat.yaml")] } - reloader_helm_config = { values = [file("./config/reloader/override-reloader.yaml")] } - external_dns_helm_config = { values = [file("./config/override-external-dns.yaml")] } - redis_helm_config = { values = [file("./config/override-redis.yaml")] } - actions_runner_controller_helm_config = { values = [file("./config/override-actions-runner-controller.yaml")] } + metrics_server_helm_config = { values = [file("./config/override-metrics-server.yaml")] } + cluster_autoscaler_helm_config = { values = [file("./config/override-cluster-autoscaler.yaml")] } + karpenter_helm_config = { values = [file("./config/override-karpenter.yaml")] } + aws_load_balancer_controller_helm_config = { values = [file("./config/override-aws-load-balancer-controller.yaml")] } + aws_node_termination_handler_helm_config = { values = [file("./config/override-aws-node-termination-handler.yaml")] } + aws_efs_csi_driver_helm_config = { values = [file("./config/override-aws-efs-csi-driver.yaml")] } + aws_ebs_csi_driver_helm_config = { values = [file("./config/override-aws-ebs-csi-driver.yaml")] } + calico_tigera_helm_config = { values = [file("./config/calico-tigera-values.yaml")] } + istio_ingress_helm_config = { values = [file("./config/istio/override-values.yaml")] } + kiali_server_helm_config = { values = [file("./config/kiali/override-values.yaml")] } + external_secrets_helm_config = { values = [file("./config/external-secret/override-values.yaml")] } + ingress_nginx_helm_config = { values = [file("./config/override-ingress-nginx.yaml")] } + kubeclarity_helm_config = { values = [file("./config/override-kubeclarity.yaml")] } + fluent_bit_helm_config = { values = [file("./config/override-fluent-bit.yaml")] } + velero_helm_config = { values = [file("./config/override-velero.yaml")] } + new_relic_helm_config = { values = [file("./config/override-new-relic.yaml")] } + kube_state_metrics_helm_config = { values = [file("./config/override-kube-state-matrics.yaml")] } + keda_helm_config = { values = [file("./config/keda/override-keda.yaml")] } + certification_manager_helm_config = { values = [file("./config/override-certification-manager.yaml")] } + filebeat_helm_config = { values = [file("./config/override-filebeat.yaml")] } + reloader_helm_config = { values = [file("./config/reloader/override-reloader.yaml")] } + external_dns_helm_config = { values = [file("./config/override-external-dns.yaml")] } + redis_helm_config = { values = [file("./config/override-redis.yaml")] } + actions_runner_controller_helm_config = { values = [file("./config/override-actions-runner-controller.yaml")] } + prometheus_cloudwatch_exporter_helm_config = { values = [file("./config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml")] } + prometheus_cloudwatch_exporter_secret_manifest = ["./config/prometheus-cloudwatch-exporter/secret.yaml"] # -- Override Helm Release attributes - metrics_server_extra_configs = var.metrics_server_extra_configs - cluster_autoscaler_extra_configs = var.cluster_autoscaler_extra_configs - karpenter_extra_configs = var.karpenter_extra_configs - aws_load_balancer_controller_extra_configs = var.aws_load_balancer_controller_extra_configs - aws_node_termination_handler_extra_configs = var.aws_node_termination_handler_extra_configs - aws_efs_csi_driver_extra_configs = var.aws_efs_csi_driver_extra_configs - aws_ebs_csi_driver_extra_configs = var.aws_ebs_csi_driver_extra_configs - calico_tigera_extra_configs = var.calico_tigera_extra_configs - istio_ingress_extra_configs = var.istio_ingress_extra_configs - kiali_server_extra_configs = var.kiali_server_extra_configs - ingress_nginx_extra_configs = var.ingress_nginx_extra_configs - kubeclarity_extra_configs = var.kubeclarity_extra_configs - fluent_bit_extra_configs = var.fluent_bit_extra_configs - velero_extra_configs = var.velero_extra_configs - new_relic_extra_configs = var.new_relic_extra_configs - kube_state_metrics_extra_configs = var.kube_state_metrics_extra_configs - keda_extra_configs = var.keda_extra_configs - certification_manager_extra_configs = var.certification_manager_extra_configs - external_secrets_extra_configs = var.external_secrets_extra_configs - filebeat_extra_configs = var.filebeat_extra_configs - reloader_extra_configs = var.reloader_extra_configs - external_dns_extra_configs = var.external_dns_extra_configs - redis_extra_configs = var.redis_extra_configs - actions_runner_controller_extra_configs = var.actions_runner_controller_extra_configs + metrics_server_extra_configs = var.metrics_server_extra_configs + cluster_autoscaler_extra_configs = var.cluster_autoscaler_extra_configs + karpenter_extra_configs = var.karpenter_extra_configs + aws_load_balancer_controller_extra_configs = var.aws_load_balancer_controller_extra_configs + aws_node_termination_handler_extra_configs = var.aws_node_termination_handler_extra_configs + aws_efs_csi_driver_extra_configs = var.aws_efs_csi_driver_extra_configs + aws_ebs_csi_driver_extra_configs = var.aws_ebs_csi_driver_extra_configs + calico_tigera_extra_configs = var.calico_tigera_extra_configs + istio_ingress_extra_configs = var.istio_ingress_extra_configs + kiali_server_extra_configs = var.kiali_server_extra_configs + ingress_nginx_extra_configs = var.ingress_nginx_extra_configs + kubeclarity_extra_configs = var.kubeclarity_extra_configs + fluent_bit_extra_configs = var.fluent_bit_extra_configs + velero_extra_configs = var.velero_extra_configs + new_relic_extra_configs = var.new_relic_extra_configs + kube_state_metrics_extra_configs = var.kube_state_metrics_extra_configs + keda_extra_configs = var.keda_extra_configs + certification_manager_extra_configs = var.certification_manager_extra_configs + external_secrets_extra_configs = var.external_secrets_extra_configs + filebeat_extra_configs = var.filebeat_extra_configs + reloader_extra_configs = var.reloader_extra_configs + external_dns_extra_configs = var.external_dns_extra_configs + redis_extra_configs = var.redis_extra_configs + actions_runner_controller_extra_configs = var.actions_runner_controller_extra_configs + prometheus_cloudwatch_exporter_extra_configs = var.prometheus_cloudwatch_exporter_extra_configs # -- Custom IAM Policy Json for Addon's ServiceAccount cluster_autoscaler_iampolicy_json_content = file("./custom-iam-policies/cluster-autoscaler.json") @@ -246,7 +248,7 @@ module "addons-internal" { depends_on = [module.eks] eks_cluster_name = module.eks.cluster_name - istio_ingress = true + istio_ingress = false istio_manifests = var.istio_manifests_internal istio_ingress_extra_configs = var.istio_ingress_extra_configs_internal } \ No newline at end of file diff --git a/_examples/complete/variables.tf b/_examples/complete/variables.tf index 7d22c9b..852a52d 100644 --- a/_examples/complete/variables.tf +++ b/_examples/complete/variables.tf @@ -208,3 +208,9 @@ variable "actions_runner_controller_extra_configs" { type = any default = {} } + +# ---------------------- PROMETHEUS-CLOUDWATCH-EXPORTER ------------------------------------------------ +variable "prometheus_cloudwatch_exporter_extra_configs" { + type = any + default = {} +} diff --git a/addons/fluent-bit/README.md b/addons/fluent-bit/README.md index 2a32710..440e842 100644 --- a/addons/fluent-bit/README.md +++ b/addons/fluent-bit/README.md @@ -4,7 +4,7 @@ Fluent Bit is a lightweight log processor and forwarder that you use to collect ## Installation Below terraform script shows how to use FluentBit Terraform Addon, A complete example is also given [here](https://github.com/clouddrove/terraform-helm-eks-addons/blob/master/_examples/complete/main.tf). -```bash +```hcl module "addons" { source = "clouddrove/eks-addons/aws" version = "0.0.4" diff --git a/addons/ingress-nginx/README.md b/addons/ingress-nginx/README.md index 2150d7c..e9857e0 100644 --- a/addons/ingress-nginx/README.md +++ b/addons/ingress-nginx/README.md @@ -9,7 +9,7 @@ Below terraform script shows how to use Ingress Nginx Terraform Addon, A complet user can change this behaviour according to their need. They just have to change values in `/_example/complete/config/override-ingress-nginx.yaml` file. User can also add annotations according to their need or they can add their own config file by the same name. - if user wants to change `namespace`, `chart version`, `timeout`, `atomic` and other helm artributes, A complete list of artributes is also given here [here](https://github.com/clouddrove/terraform-aws-eks-addons/blob/master/addons/helm/main.tf#L3-L32). then they can change this in `/_example/complate/variable.tf` at -```bash +```hcl #--------------INGRESS NGINX------------ variable "ingress_nginx_extra_configs" { type = any @@ -17,7 +17,7 @@ variable "ingress_nginx_extra_configs" { } ``` -```bash +```hcl module "addons" { source = "../../" depends_on = [null_resource.kubectl] diff --git a/addons/istio-ingress/README.md b/addons/istio-ingress/README.md index a32fa81..882d0b3 100644 --- a/addons/istio-ingress/README.md +++ b/addons/istio-ingress/README.md @@ -4,7 +4,7 @@ Istio is a service meshβ€”a modernized service networking layer that provides a ## Installation Below terraform script shows how to use Istio-Ingress Terraform Addon, A complete example is also given [here](https://github.com/clouddrove/terraform-helm-eks-addons/blob/master/_examples/complete/main.tf). -```bash +```hcl module "addons" { source = "clouddrove/eks-addons/aws" version = "0.0.1" diff --git a/addons/karpenter/README.md b/addons/karpenter/README.md index 45b008d..01420e9 100644 --- a/addons/karpenter/README.md +++ b/addons/karpenter/README.md @@ -4,7 +4,7 @@ Karpenter simplifies Kubernetes infrastructure with the right nodes at the right ## Installation Below terraform script shows how to use Karpenter Terraform Addon, A complete example is also given [here](https://github.com/clouddrove/terraform-helm-eks-addons/blob/master/_examples/complete/main.tf). -```bash +```hcl module "addons" { source = "clouddrove/eks-addons/aws" version = "0.0.1" diff --git a/addons/keda/README.md b/addons/keda/README.md index 1640429..cbe8b41 100644 --- a/addons/keda/README.md +++ b/addons/keda/README.md @@ -4,7 +4,7 @@ KEDA allows for fine grained autoscaling (including to/from zero) for event driv ## Installation Below terraform script shows how to use Keda Terraform Addon, A complete example is also given [here](https://github.com/clouddrove/terraform-helm-eks-addons/blob/master/_examples/complete/main.tf). -```bash +```hcl module "addons" { source = "clouddrove/eks-addons/aws" version = "0.0.9" diff --git a/addons/kiali-server/README.md b/addons/kiali-server/README.md index 1443693..19ce10b 100644 --- a/addons/kiali-server/README.md +++ b/addons/kiali-server/README.md @@ -2,7 +2,7 @@ ## Installation Below terraform script shows how to use Kiali-Server Terraform Addon, A complete example is also given [here](https://github.com/clouddrove/terraform-helm-eks-addons/blob/master/_examples/complete/main.tf). -```bash +```hcl module "addons" { source = "clouddrove/eks-addons/aws" version = "0.0.1" diff --git a/addons/prometheus-cloudwatch-exporter/README.md b/addons/prometheus-cloudwatch-exporter/README.md new file mode 100644 index 0000000..35ffa2e --- /dev/null +++ b/addons/prometheus-cloudwatch-exporter/README.md @@ -0,0 +1,98 @@ +# Prometheus Cloudwatch Exporter Helm Chart + +The CloudWatch Exporter for Prometheus is a tool that allows you to export Amazon CloudWatch metrics in the Prometheus format. Amazon CloudWatch is a monitoring and observability service provided by AWS that provides metrics, logs, and traces from AWS resources and applications + +## Installation +Below terraform script describes how to use Prometheus Cloudwatch Exporter Terraform Addon, A complete example is also given [here](https://github.com/clouddrove/terraform-helm-eks-addons/blob/master/_examples/complete/main.tf). +```hcl +module "addons" { + source = "clouddrove/eks-addons/aws" + version = "0.0.9" + + depends_on = [module.eks.cluster_id] + eks_cluster_name = module.eks.cluster_name + + prometheus_cloudwatch_exporter = true +} +``` + +## Configuration +[This] documentation can help you to configure CloudWatch exporter to get the metrics from AWS. +Configuration examples for different namespaces can be found in [this](https://github.com/prometheus/cloudwatch_exporter/blob/master/examples) examples. +A configuration builder can be found [here](https://github.com/djloude/cloudwatch_exporter_metrics_config_builder). +Configure the exporter for namespaces accordingly and use it in the `./config/override-prometheus-cloudwatch-exporter-controller.yaml` override file like this. + +```yaml +## Node affinity for particular node in which labels key is "Infra-Services" and value is "true" +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "eks.amazonaws.com/nodegroup" + operator: In + values: + - "critical" +## Using limits and requests +resources: + limits: + cpu: 300m + memory: 250Mi + requests: + cpu: 50m + memory: 150Mi +# This config is for AWS Load balancer +config: |- + # This is the default configuration for prometheus-cloudwatch-exporter + region: eu-west-1 + period_seconds: 240 + metrics: + - aws_namespace: AWS/ELB + aws_metric_name: HealthyHostCount + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Average] + + - aws_namespace: AWS/ELB + aws_metric_name: UnHealthyHostCount + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Average] + + - aws_namespace: AWS/ELB + aws_metric_name: RequestCount + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Sum] + + - aws_namespace: AWS/ELB + aws_metric_name: Latency + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Average] + + - aws_namespace: AWS/ELB + aws_metric_name: SurgeQueueLength + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Maximum, Sum] + +``` + +## Authentication +- There are two methods to Authenticate with AWS + +### Using Secrets +- Update Access key and Secret Access keys from the config files provided in the examples. + +### Using Role (Default) +- Don't pass secret to use Role based authentication. +- A Role and Policy will be created for authentication with AWS. +- Pass RoleName if you have existing role for the authentication: +```hcl +prometheus_cloudwatch_exporter_extra_configs = { + role_name = "prometheus_cloudwatch_exporter_role" +} +``` +- To override the default policy create `json` format file and pass it like this: +```hcl +prometheus_cloudwatch_exporter_iampolicy_json_content = file("./custom-iam-policies/prometheus-cloudwatch-exporter.json") +``` + + + diff --git a/addons/prometheus-cloudwatch-exporter/config/prometheus-cloudwatch-exporter.yaml b/addons/prometheus-cloudwatch-exporter/config/prometheus-cloudwatch-exporter.yaml new file mode 100644 index 0000000..42a63fe --- /dev/null +++ b/addons/prometheus-cloudwatch-exporter/config/prometheus-cloudwatch-exporter.yaml @@ -0,0 +1,245 @@ +# Default values for prometheus-cloudwatch-exporter. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: prom/cloudwatch-exporter + # if not set appVersion field from Chart.yaml is used + tag: + pullPolicy: IfNotPresent + pullSecrets: + # - name: "image-pull-secret" + +# Example proxy configuration: +# command: +# - 'java' +# - '-Dhttp.proxyHost=proxy.example.com' +# - '-Dhttp.proxyPort=3128' +# - '-Dhttps.proxyHost=proxy.example.com' +# - '-Dhttps.proxyPort=3128' +# - '-jar' +# - '/cloudwatch_exporter.jar' +# - '9106' +# - '/config/config.yml' + +command: [] + +containerPort: 9106 + +service: + type: ClusterIP + port: 9106 + portName: http + annotations: {} + labels: {} + +pod: + labels: {} + annotations: {} + +# Labels and annotations to attach to the deployment resource +deployment: + labels: {} + annotations: {} + +# Extra environment variables +extraEnv: + # - name: foo + # value: baa + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +aws: + role: + # Enables usage of regional STS endpoints rather than global which is default + stsRegional: + enabled: false + + # The name of a pre-created secret in which AWS credentials are stored. When + # set, aws_access_key_id is assumed to be in a field called access_key, + # aws_secret_access_key is assumed to be in a field called secret_key, and the + # session token, if it exists, is assumed to be in a field called + # security_token + secret: + name: + includesSessionToken: false + + # Note: Do not specify the aws_access_key_id and aws_secret_access_key if you specified role or secret.name before + aws_access_key_id: + aws_secret_access_key: + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + # annotations: + # Will add the provided map to the annotations for the created serviceAccount + # e.g. + # annotations: + # eks.amazonaws.com/role-arn: arn:aws:iam::1234567890:role/prom-cloudwatch-exporter-oidc + # eks.amazonaws.com/sts-regional-endpoints: "true" + # Specifies whether to automount API credentials for the ServiceAccount. + automountServiceAccountToken: true + +rbac: + # Specifies whether RBAC resources should be created + create: true + +# Configuration is rendered with `tpl` function, therefore you can use any Helm variables and/or templates here +config: |- + # This is the default configuration for prometheus-cloudwatch-exporter + region: eu-west-1 + period_seconds: 240 + metrics: + - aws_namespace: AWS/ELB + aws_metric_name: HealthyHostCount + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Average] + + - aws_namespace: AWS/ELB + aws_metric_name: UnHealthyHostCount + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Average] + + - aws_namespace: AWS/ELB + aws_metric_name: RequestCount + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Sum] + + - aws_namespace: AWS/ELB + aws_metric_name: Latency + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Average] + + - aws_namespace: AWS/ELB + aws_metric_name: SurgeQueueLength + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Maximum, Sum] + + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +# Configurable health checks against the /healthy and /ready endpoints +livenessProbe: + path: /-/healthy + initialDelaySeconds: 30 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + +readinessProbe: + path: /-/ready + initialDelaySeconds: 30 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + +serviceMonitor: + # When set true then use a ServiceMonitor to configure scraping + enabled: false + # Set the namespace the ServiceMonitor should be deployed + # namespace: monitoring + # Set how frequently Prometheus should scrape + # interval: 30s + # Set path to cloudwatch-exporter telemtery-path + # telemetryPath: /metrics + # Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator + # labels: + # Set timeout for scrape + # timeout: 10s + # Set relabelings for the ServiceMonitor, use to apply to samples before scraping + # relabelings: [] + # Set metricRelabelings for the ServiceMonitor, use to apply to samples for ingestion + # metricRelabelings: [] + # + # Example - note the Kubernetes convention of camelCase instead of Prometheus' snake_case + # metricRelabelings: + # - sourceLabels: [dbinstance_identifier] + # action: replace + # replacement: mydbname + # targetLabel: dbname + +prometheusRule: + # Specifies whether a PrometheusRule should be created + enabled: false + # Set the namespace the PrometheusRule should be deployed + # namespace: monitoring + # Set labels for the PrometheusRule, use this to define your scrape label for Prometheus Operator + # labels: + # Example - note the Kubernetes convention of camelCase instead of Prometheus' + # rules: + # - alert: ELB-Low-BurstBalance + # annotations: + # message: The ELB BurstBalance during the last 10 minutes is lower than 80%. + # expr: aws_ebs_burst_balance_average < 80 + # for: 10m + # labels: + # severity: warning + # - alert: ELB-Low-BurstBalance + # annotations: + # message: The ELB BurstBalance during the last 10 minutes is lower than 50%. + # expr: aws_ebs_burst_balance_average < 50 + # for: 10m + # labels: + # severity: warning + # - alert: ELB-Low-BurstBalance + # annotations: + # message: The ELB BurstBalance during the last 10 minutes is lower than 30%. + # expr: aws_ebs_burst_balance_average < 30 + # for: 10m + # labels: + # severity: critical + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: / + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + # pathType is only for k8s >= 1.18 + pathType: Prefix + +securityContext: + runAsUser: 65534 # run as nobody user instead of root + fsGroup: 65534 # necessary to be able to read the EKS IAM token + +containerSecurityContext: {} + # allowPrivilegeEscalation: false + # readOnlyRootFilesystem: true + +# Leverage a PriorityClass to ensure your pods survive resource shortages +# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +# priorityClassName: system-cluster-critical +priorityClassName: "" diff --git a/addons/prometheus-cloudwatch-exporter/locals.tf b/addons/prometheus-cloudwatch-exporter/locals.tf new file mode 100644 index 0000000..27ad1a8 --- /dev/null +++ b/addons/prometheus-cloudwatch-exporter/locals.tf @@ -0,0 +1,44 @@ +locals { + name = "prometheus-cloudwatch-exporter" + + default_helm_config = { + name = try(var.prometheus_cloudwatch_exporter_extra_configs.name, local.name) + chart = try(var.prometheus_cloudwatch_exporter_extra_configs.chart, local.name) + repository = try(var.prometheus_cloudwatch_exporter_extra_configs.repository, "https://prometheus-community.github.io/helm-charts") + version = try(var.prometheus_cloudwatch_exporter_extra_configs.version, "0.25.2") + namespace = try(var.prometheus_cloudwatch_exporter_extra_configs.namespace, "monitoring") + create_namespace = try(var.prometheus_cloudwatch_exporter_extra_configs.create_namespace, true) + description = "Prometheus Cloudwatch-Exporter helm Chart deployment configuration" + timeout = try(var.prometheus_cloudwatch_exporter_extra_configs.timeout, "600") + lint = try(var.prometheus_cloudwatch_exporter_extra_configs.lint, "false") + repository_key_file = try(var.prometheus_cloudwatch_exporter_extra_configs.repository_key_file, "") + repository_cert_file = try(var.prometheus_cloudwatch_exporter_extra_configs.repository_cert_file, "") + repository_username = try(var.prometheus_cloudwatch_exporter_extra_configs.repository_username, "") + repository_password = try(var.prometheus_cloudwatch_exporter_extra_configs.repository_password, "") + verify = try(var.prometheus_cloudwatch_exporter_extra_configs.verify, "false") + keyring = try(var.prometheus_cloudwatch_exporter_extra_configs.keyring, "") + disable_webhooks = try(var.prometheus_cloudwatch_exporter_extra_configs.disable_webhooks, "false") + reuse_values = try(var.prometheus_cloudwatch_exporter_extra_configs.reuse_values, "false") + reset_values = try(var.prometheus_cloudwatch_exporter_extra_configs.reset_values, "false") + force_update = try(var.prometheus_cloudwatch_exporter_extra_configs.force_update, "false") + recreate_pods = try(var.prometheus_cloudwatch_exporter_extra_configs.recreate_pods, "false") + cleanup_on_fail = try(var.prometheus_cloudwatch_exporter_extra_configs.cleanup_on_fail, "false") + max_history = try(var.prometheus_cloudwatch_exporter_extra_configs.max_history, "0") + atomic = try(var.prometheus_cloudwatch_exporter_extra_configs.atomic, "false") + skip_crds = try(var.prometheus_cloudwatch_exporter_extra_configs.skip_crds, "false") + render_subchart_notes = try(var.prometheus_cloudwatch_exporter_extra_configs.render_subchart_notes, "true") + disable_openapi_validation = try(var.prometheus_cloudwatch_exporter_extra_configs.disable_openapi_validation, "false") + wait = try(var.prometheus_cloudwatch_exporter_extra_configs.wait, "true") + wait_for_jobs = try(var.prometheus_cloudwatch_exporter_extra_configs.wait_for_jobs, "false") + dependency_update = try(var.prometheus_cloudwatch_exporter_extra_configs.dependency_update, "false") + replace = try(var.prometheus_cloudwatch_exporter_extra_configs.replace, "false") + } + + helm_config = merge( + local.default_helm_config, + var.helm_config + ) + + role_name = try(var.prometheus_cloudwatch_exporter_extra_configs.role_name, "${local.name}-${var.eks_cluster_name}-role") + policy_name = "${local.name}-${var.eks_cluster_name}-policy" +} diff --git a/addons/prometheus-cloudwatch-exporter/main.tf b/addons/prometheus-cloudwatch-exporter/main.tf new file mode 100644 index 0000000..e815ad9 --- /dev/null +++ b/addons/prometheus-cloudwatch-exporter/main.tf @@ -0,0 +1,131 @@ +module "prometheus_cloudwatch_exporter_secret" { + count = length(var.secret_manifest) + source = "../helm" + + manage_via_gitops = var.manage_via_gitops + helm_config = local.helm_config + addon_context = var.addon_context + + set_values = [ + { + name = "aws.secret.name" + value = "aws" + } + ] + + depends_on = [resource.kubectl_manifest.secret_manifest] +} + +module "prometheus_cloudwatch_exporter_role" { + # count = var.secret_manifest == [] && try(var.prometheus_cloudwatch_exporter_extra_configs.role_name, "") != "" ? 1 : 0 + count = var.secret_manifest == [] ? 1 : 0 + source = "../helm" + + manage_via_gitops = var.manage_via_gitops + helm_config = local.helm_config + addon_context = var.addon_context + + set_values = [ + { + name = "aws.role" + value = local.role_name + } + ] + + depends_on = [resource.kubectl_manifest.secret_manifest] +} + +# Secret for AWS Authentication with cloudwatch exporter +resource "kubectl_manifest" "secret_manifest" { + count = length(var.secret_manifest) + yaml_body = file(var.secret_manifest[count.index]) +} + +# Role for AWS Authentication +data "aws_iam_policy_document" "role" { + # count = var.secret_manifest == [] && try(var.prometheus_cloudwatch_exporter_extra_configs.role_name, "") != "" ? 1 : 0 + count = var.secret_manifest == [] ? 1 : 0 + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["eks.amazonaws.com"] + } + } +} + +resource "aws_iam_role" "role" { + # count = var.secret_manifest == [] && try(var.prometheus_cloudwatch_exporter_extra_configs.role_name, "") != "" ? 1 : 0 + count = var.secret_manifest == [] ? 1 : 0 + name = local.role_name + assume_role_policy = data.aws_iam_policy_document.role[0].json +} + +# Policy of the Role +resource "aws_iam_policy" "policy" { + # count = var.secret_manifest == [] && try(var.prometheus_cloudwatch_exporter_extra_configs.role_name, "") != "" ? 1 : 0 + count = var.secret_manifest == [] ? 1 : 0 + name = local.policy_name + path = "/" + description = "IAM Policy used by ${local.name}-${var.eks_cluster_name} IAM Role" + policy = var.iampolicy_json_content != null ? var.iampolicy_json_content : <<-EOT +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowReadingMetricsFromCloudWatch", + "Effect": "Allow", + "Action": [ + "cloudwatch:DescribeAlarmsForMetric", + "cloudwatch:DescribeAlarmHistory", + "cloudwatch:DescribeAlarms", + "cloudwatch:ListMetrics", + "cloudwatch:GetMetricData", + "cloudwatch:GetInsightRuleReport", + "cloudwatch:GetMetricStatistics" + ], + "Resource": "*" + }, + { + "Sid": "AllowReadingLogsFromCloudWatch", + "Effect": "Allow", + "Action": [ + "logs:DescribeLogGroups", + "logs:GetLogGroupFields", + "logs:StartQuery", + "logs:StopQuery", + "logs:GetQueryResults", + "logs:GetLogEvents" + ], + "Resource": "*" + }, + { + "Sid": "AllowReadingTagsInstancesRegionsFromEC2", + "Effect": "Allow", + "Action": [ + "ec2:DescribeTags", + "ec2:DescribeInstances", + "ec2:DescribeRegions" + ], + "Resource": "*" + }, + { + "Sid": "AllowReadingResourcesForTags", + "Effect": "Allow", + "Action": "tag:GetResources", + "Resource": "*" + } + ] +} +EOT +} + +# Policy Attachment with Role +resource "aws_iam_role_policy_attachment" "prometheus_cloudwatch_exporter_policy" { + # count = var.secret_manifest == [] && try(var.prometheus_cloudwatch_exporter_extra_configs.role_name, "") != "" ? 1 : 0 + count = var.secret_manifest == [] ? 1 : 0 + policy_arn = aws_iam_policy.policy[0].arn + role = aws_iam_role.role[0].name +} \ No newline at end of file diff --git a/addons/prometheus-cloudwatch-exporter/outputs.tf b/addons/prometheus-cloudwatch-exporter/outputs.tf new file mode 100644 index 0000000..a3832b1 --- /dev/null +++ b/addons/prometheus-cloudwatch-exporter/outputs.tf @@ -0,0 +1,11 @@ +output "namespace" { + value = local.default_helm_config.namespace +} + +output "chart_version" { + value = local.default_helm_config.version +} + +output "repository" { + value = local.default_helm_config.repository +} \ No newline at end of file diff --git a/addons/prometheus-cloudwatch-exporter/variables.tf b/addons/prometheus-cloudwatch-exporter/variables.tf new file mode 100644 index 0000000..c0c2246 --- /dev/null +++ b/addons/prometheus-cloudwatch-exporter/variables.tf @@ -0,0 +1,49 @@ +variable "helm_config" { + description = "Helm provider config for Prometheus Cloudwatch Exporter" + type = any + default = {} +} + +variable "manage_via_gitops" { + description = "Determines if the add-on should be managed via GitOps" + type = bool + default = false +} + +variable "addon_context" { + description = "Input configuration for the addon" + type = object({ + aws_caller_identity_account_id = string + aws_caller_identity_arn = string + aws_eks_cluster_endpoint = string + aws_partition_id = string + aws_region_name = string + eks_cluster_id = string + eks_oidc_issuer_url = string + eks_oidc_provider_arn = string + tags = map(string) + }) +} + +variable "prometheus_cloudwatch_exporter_extra_configs" { + description = "Override attributes of helm_release terraform resource" + type = any + default = {} +} + +variable "secret_manifest" { + description = "Path of Ingress and Gateway yaml manifests" + type = list(any) + default = [] +} + +variable "eks_cluster_name" { + type = string + default = "" +} + +variable "iampolicy_json_content" { + description = "Custom IAM Policy for Prometheus Cloudwatch Exporter's Role" + type = string + default = null +} \ No newline at end of file diff --git a/addons/prometheus-cloudwatch-exporter/versions.tf b/addons/prometheus-cloudwatch-exporter/versions.tf new file mode 100644 index 0000000..dc04845 --- /dev/null +++ b/addons/prometheus-cloudwatch-exporter/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.23" + } + aws = { + source = "hashicorp/aws" + version = ">= 5.29" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.12" + } + kubectl = { + source = "gavinbunney/kubectl" + version = ">= 1.13.0" + } + } +} diff --git a/addons/reloader/README.md b/addons/reloader/README.md index 7139920..21a3d5b 100644 --- a/addons/reloader/README.md +++ b/addons/reloader/README.md @@ -5,7 +5,7 @@ Reloader manages the following AWS resources ## Installation Below terraform script shows how to use Reloader Terraform Addon, A complete example is also given [here](https://github.com/clouddrove/terraform-helm-eks-addons/blob/master/_examples/complete/main.tf). -```bash +```hcl module "addons" { source = "clouddrove/eks-addons/aws" version = "0.1.0" diff --git a/main.tf b/main.tf index a80272b..2d4d4e3 100644 --- a/main.tf +++ b/main.tf @@ -246,4 +246,16 @@ module "actions_runner_controller" { manage_via_gitops = var.manage_via_gitops addon_context = local.addon_context actions_runner_controller_extra_configs = var.actions_runner_controller_extra_configs +} + +module "prometheus_cloudwatch_exporter" { + count = var.prometheus_cloudwatch_exporter ? 1 : 0 + source = "./addons/prometheus-cloudwatch-exporter" + helm_config = var.prometheus_cloudwatch_exporter_helm_config != null ? var.prometheus_cloudwatch_exporter_helm_config : { values = [local_file.prometheus_cloudwatch_exporter_helm_config[count.index].content] } + manage_via_gitops = var.manage_via_gitops + addon_context = local.addon_context + prometheus_cloudwatch_exporter_extra_configs = var.prometheus_cloudwatch_exporter_extra_configs + secret_manifest = var.prometheus_cloudwatch_exporter_secret_manifest + eks_cluster_name = data.aws_eks_cluster.eks_cluster.name + iampolicy_json_content = var.prometheus_cloudwatch_exporter_role_iampolicy_json_content } \ No newline at end of file diff --git a/override_values.tf b/override_values.tf index a501a31..e33d2a2 100644 --- a/override_values.tf +++ b/override_values.tf @@ -835,4 +835,31 @@ resources: EOT filename = "${path.module}/override_values/actions_runner_controller.yaml" +} + +#-----------PROMETHEUS-CLOUDWATCH-EXPORTER-------------------- +resource "local_file" "prometheus_cloudwatch_exporter_helm_config" { + count = var.prometheus_cloudwatch_exporter && (var.prometheus_cloudwatch_exporter_helm_config == null) ? 1 : 0 + content = < Date: Thu, 28 Dec 2023 22:54:19 +0530 Subject: [PATCH 02/13] fix: fixed syntax and conditional error in role based authentication for prometheus cloudwatch exporter --- _examples/complete/main.tf | 44 ++++++++++++++++----------------- _examples/complete/variables.tf | 4 ++- 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/_examples/complete/main.tf b/_examples/complete/main.tf index b423a2a..290e867 100644 --- a/_examples/complete/main.tf +++ b/_examples/complete/main.tf @@ -152,35 +152,35 @@ module "addons" { eks_cluster_name = module.eks.cluster_name # -- Enable Addons - metrics_server = false - cluster_autoscaler = false - aws_load_balancer_controller = false + metrics_server = true + cluster_autoscaler = true + aws_load_balancer_controller = true aws_node_termination_handler = true - aws_efs_csi_driver = false - aws_ebs_csi_driver = false - kube_state_metrics = false + aws_efs_csi_driver = true + aws_ebs_csi_driver = true + kube_state_metrics = true karpenter = false # -- Set to `false` or comment line to Uninstall Karpenter if installed using terraform. - calico_tigera = false - new_relic = false - kubeclarity = false - ingress_nginx = false - fluent_bit = false - velero = false - keda = false - certification_manager = false - filebeat = false - reloader = false - external_dns = false - redis = false - actions_runner_controller = false + calico_tigera = true + new_relic = true + kubeclarity = true + ingress_nginx = true + fluent_bit = true + velero = true + keda = true + certification_manager = true + filebeat = true + reloader = true + external_dns = true + redis = true + actions_runner_controller = true prometheus_cloudwatch_exporter = true # -- Addons with mandatory variable - istio_ingress = false + istio_ingress = true istio_manifests = var.istio_manifests - kiali_server = false + kiali_server = true kiali_manifests = var.kiali_manifests - external_secrets = false + external_secrets = true # -- Path of override-values.yaml file metrics_server_helm_config = { values = [file("./config/override-metrics-server.yaml")] } diff --git a/_examples/complete/variables.tf b/_examples/complete/variables.tf index 852a52d..b5bcf01 100644 --- a/_examples/complete/variables.tf +++ b/_examples/complete/variables.tf @@ -212,5 +212,7 @@ variable "actions_runner_controller_extra_configs" { # ---------------------- PROMETHEUS-CLOUDWATCH-EXPORTER ------------------------------------------------ variable "prometheus_cloudwatch_exporter_extra_configs" { type = any - default = {} + default = { + role_name = "" + } } From 5fcd8a2ad66956740967e2c109b8ace0726bf69b Mon Sep 17 00:00:00 2001 From: Anmol Nagpal Date: Thu, 28 Dec 2023 22:55:37 +0530 Subject: [PATCH 03/13] fix- pulled from master branch --- .../prometheus-cloudwatch-exporter/README.md | 2 +- .../prometheus-cloudwatch-exporter/locals.tf | 2 +- addons/prometheus-cloudwatch-exporter/main.tf | 21 +++++++------------ .../variables.tf | 4 +++- variables.tf | 4 +++- 5 files changed, 16 insertions(+), 17 deletions(-) diff --git a/addons/prometheus-cloudwatch-exporter/README.md b/addons/prometheus-cloudwatch-exporter/README.md index 35ffa2e..b4c6e89 100644 --- a/addons/prometheus-cloudwatch-exporter/README.md +++ b/addons/prometheus-cloudwatch-exporter/README.md @@ -17,7 +17,7 @@ module "addons" { ``` ## Configuration -[This] documentation can help you to configure CloudWatch exporter to get the metrics from AWS. +This documentation can help you to configure CloudWatch exporter to get the metrics from AWS. Configuration examples for different namespaces can be found in [this](https://github.com/prometheus/cloudwatch_exporter/blob/master/examples) examples. A configuration builder can be found [here](https://github.com/djloude/cloudwatch_exporter_metrics_config_builder). Configure the exporter for namespaces accordingly and use it in the `./config/override-prometheus-cloudwatch-exporter-controller.yaml` override file like this. diff --git a/addons/prometheus-cloudwatch-exporter/locals.tf b/addons/prometheus-cloudwatch-exporter/locals.tf index 27ad1a8..023ca21 100644 --- a/addons/prometheus-cloudwatch-exporter/locals.tf +++ b/addons/prometheus-cloudwatch-exporter/locals.tf @@ -39,6 +39,6 @@ locals { var.helm_config ) - role_name = try(var.prometheus_cloudwatch_exporter_extra_configs.role_name, "${local.name}-${var.eks_cluster_name}-role") + role_name = coalesce(var.prometheus_cloudwatch_exporter_extra_configs.role_name, "${local.name}-${var.eks_cluster_name}-role") policy_name = "${local.name}-${var.eks_cluster_name}-policy" } diff --git a/addons/prometheus-cloudwatch-exporter/main.tf b/addons/prometheus-cloudwatch-exporter/main.tf index e815ad9..d3a8b6c 100644 --- a/addons/prometheus-cloudwatch-exporter/main.tf +++ b/addons/prometheus-cloudwatch-exporter/main.tf @@ -13,12 +13,12 @@ module "prometheus_cloudwatch_exporter_secret" { } ] - depends_on = [resource.kubectl_manifest.secret_manifest] + depends_on = [kubectl_manifest.secret_manifest] } module "prometheus_cloudwatch_exporter_role" { - # count = var.secret_manifest == [] && try(var.prometheus_cloudwatch_exporter_extra_configs.role_name, "") != "" ? 1 : 0 - count = var.secret_manifest == [] ? 1 : 0 + count = var.secret_manifest == [] && var.prometheus_cloudwatch_exporter_extra_configs.role_name == "" ? 1 : 0 + # count = var.secret_manifest == [] ? 1 : 0 source = "../helm" manage_via_gitops = var.manage_via_gitops @@ -31,8 +31,7 @@ module "prometheus_cloudwatch_exporter_role" { value = local.role_name } ] - - depends_on = [resource.kubectl_manifest.secret_manifest] + depends_on = [kubectl_manifest.secret_manifest] } # Secret for AWS Authentication with cloudwatch exporter @@ -43,8 +42,7 @@ resource "kubectl_manifest" "secret_manifest" { # Role for AWS Authentication data "aws_iam_policy_document" "role" { - # count = var.secret_manifest == [] && try(var.prometheus_cloudwatch_exporter_extra_configs.role_name, "") != "" ? 1 : 0 - count = var.secret_manifest == [] ? 1 : 0 + count = length(var.secret_manifest) == 0 && var.prometheus_cloudwatch_exporter_extra_configs.role_name == "" ? 1 : 0 statement { effect = "Allow" actions = ["sts:AssumeRole"] @@ -57,16 +55,14 @@ data "aws_iam_policy_document" "role" { } resource "aws_iam_role" "role" { - # count = var.secret_manifest == [] && try(var.prometheus_cloudwatch_exporter_extra_configs.role_name, "") != "" ? 1 : 0 - count = var.secret_manifest == [] ? 1 : 0 + count = length(var.secret_manifest) == 0 && var.prometheus_cloudwatch_exporter_extra_configs.role_name == "" ? 1 : 0 name = local.role_name assume_role_policy = data.aws_iam_policy_document.role[0].json } # Policy of the Role resource "aws_iam_policy" "policy" { - # count = var.secret_manifest == [] && try(var.prometheus_cloudwatch_exporter_extra_configs.role_name, "") != "" ? 1 : 0 - count = var.secret_manifest == [] ? 1 : 0 + count = length(var.secret_manifest) == 0 && var.prometheus_cloudwatch_exporter_extra_configs.role_name == "" ? 1 : 0 name = local.policy_name path = "/" description = "IAM Policy used by ${local.name}-${var.eks_cluster_name} IAM Role" @@ -124,8 +120,7 @@ EOT # Policy Attachment with Role resource "aws_iam_role_policy_attachment" "prometheus_cloudwatch_exporter_policy" { - # count = var.secret_manifest == [] && try(var.prometheus_cloudwatch_exporter_extra_configs.role_name, "") != "" ? 1 : 0 - count = var.secret_manifest == [] ? 1 : 0 + count = length(var.secret_manifest) == 0 && var.prometheus_cloudwatch_exporter_extra_configs.role_name == "" ? 1 : 0 policy_arn = aws_iam_policy.policy[0].arn role = aws_iam_role.role[0].name } \ No newline at end of file diff --git a/addons/prometheus-cloudwatch-exporter/variables.tf b/addons/prometheus-cloudwatch-exporter/variables.tf index c0c2246..096066a 100644 --- a/addons/prometheus-cloudwatch-exporter/variables.tf +++ b/addons/prometheus-cloudwatch-exporter/variables.tf @@ -28,7 +28,9 @@ variable "addon_context" { variable "prometheus_cloudwatch_exporter_extra_configs" { description = "Override attributes of helm_release terraform resource" type = any - default = {} + default = { + role_name = null + } } variable "secret_manifest" { diff --git a/variables.tf b/variables.tf index cf5000a..9ed4978 100644 --- a/variables.tf +++ b/variables.tf @@ -584,7 +584,9 @@ variable "prometheus_cloudwatch_exporter_helm_config" { variable "prometheus_cloudwatch_exporter_extra_configs" { description = "Override attributes of helm_release terraform resource" type = any - default = {} + default = { + role_name = "" + } } variable "prometheus_cloudwatch_exporter_secret_manifest" { From 318d9cebaf23e7d1c2dc52b60fa3d810661a542f Mon Sep 17 00:00:00 2001 From: Anmol Nagpal Date: Thu, 28 Dec 2023 23:02:15 +0530 Subject: [PATCH 04/13] feat: added prometheus cloudwatch exporter addon in external-eks example --- ...etheus-cloudwatch-exporter-controller.yaml | 51 +++++++ .../secret.yaml | 10 ++ _examples/external-eks/main.tf | 132 +++++++++--------- _examples/external-eks/variables.tf | 10 +- 4 files changed, 137 insertions(+), 66 deletions(-) create mode 100644 _examples/external-eks/config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml create mode 100644 _examples/external-eks/config/prometheus-cloudwatch-exporter/secret.yaml diff --git a/_examples/external-eks/config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml b/_examples/external-eks/config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml new file mode 100644 index 0000000..1b0d32a --- /dev/null +++ b/_examples/external-eks/config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml @@ -0,0 +1,51 @@ +## Node affinity for particular node in which labels key is "Infra-Services" and value is "true" + +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "eks.amazonaws.com/nodegroup" + operator: In + values: + - "critical" + +## Using limits and requests +resources: + limits: + cpu: 300m + memory: 250Mi + requests: + cpu: 50m + memory: 150Mi + +# Configuration is rendered with `tpl` function, therefore you can use any Helm variables and/or templates here +config: |- + # This is the default configuration for prometheus-cloudwatch-exporter + region: eu-west-1 + period_seconds: 240 + metrics: + - aws_namespace: AWS/ELB + aws_metric_name: HealthyHostCount + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Average] + + - aws_namespace: AWS/ELB + aws_metric_name: UnHealthyHostCount + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Average] + + - aws_namespace: AWS/ELB + aws_metric_name: RequestCount + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Sum] + + - aws_namespace: AWS/ELB + aws_metric_name: Latency + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Average] + + - aws_namespace: AWS/ELB + aws_metric_name: SurgeQueueLength + aws_dimensions: [AvailabilityZone, LoadBalancerName] + aws_statistics: [Maximum, Sum] diff --git a/_examples/external-eks/config/prometheus-cloudwatch-exporter/secret.yaml b/_examples/external-eks/config/prometheus-cloudwatch-exporter/secret.yaml new file mode 100644 index 0000000..13df3ad --- /dev/null +++ b/_examples/external-eks/config/prometheus-cloudwatch-exporter/secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: aws + namespace: monitoring # Namespace of Prometheus Cloudwatch Exporter addon destination +type: Opaque +data: + access_key: QUtJQVdGV0VLSlBTQU9INVlIRlQK # Encoded AWS Access key - Enter Correct AWS Access Key Encoded with base64 + secret_key: SjZLVDRTSkZIVG9leTQ1M2hadllmMWZpR2pYa0l1UkFmYkhLRHpUdAo= # Encoded AWS Secret Access key - Enter Correct AWS Secret Access Key Encoded with base64 + # Refer https://www.baeldung.com/linux/cli-base64-encode-decode this URL to Encode and Decode of String in Base64 \ No newline at end of file diff --git a/_examples/external-eks/main.tf b/_examples/external-eks/main.tf index dbdfc6a..cd8c437 100644 --- a/_examples/external-eks/main.tf +++ b/_examples/external-eks/main.tf @@ -13,26 +13,26 @@ module "addons" { eks_cluster_name = local.name # -- Enable Addons - metrics_server = true - cluster_autoscaler = true - aws_load_balancer_controller = true - aws_node_termination_handler = true - aws_efs_csi_driver = true - aws_ebs_csi_driver = true - kube_state_metrics = true - karpenter = false # -- Set to `false` or comment line to Uninstall Karpenter if installed using terraform. - calico_tigera = true - new_relic = true - kubeclarity = true - ingress_nginx = true - fluent_bit = true - velero = true - keda = true - certification_manager = true - filebeat = true - reloader = true - redis = true - + metrics_server = true + cluster_autoscaler = true + aws_load_balancer_controller = true + aws_node_termination_handler = true + aws_efs_csi_driver = true + aws_ebs_csi_driver = true + kube_state_metrics = true + karpenter = false # -- Set to `false` or comment line to Uninstall Karpenter if installed using terraform. + calico_tigera = true + new_relic = true + kubeclarity = true + ingress_nginx = true + fluent_bit = true + velero = true + keda = true + certification_manager = true + filebeat = true + reloader = true + redis = true + prometheus_cloudwatch_exporter = true # -- Addons with mandatory variable istio_ingress = true @@ -42,53 +42,55 @@ module "addons" { external_secrets = true # -- Path of override-values.yaml file - metrics_server_helm_config = { values = [file("./config/override-metrics-server.yaml")] } - cluster_autoscaler_helm_config = { values = [file("./config/override-cluster-autoscaler.yaml")] } - karpenter_helm_config = { values = [file("./config/override-karpenter.yaml")] } - aws_load_balancer_controller_helm_config = { values = [file("./config/override-aws-load-balancer-controller.yaml")] } - aws_node_termination_handler_helm_config = { values = [file("./config/override-aws-node-termination-handler.yaml")] } - aws_efs_csi_driver_helm_config = { values = [file("./config/override-aws-efs-csi-driver.yaml")] } - aws_ebs_csi_driver_helm_config = { values = [file("./config/override-aws-ebs-csi-driver.yaml")] } - calico_tigera_helm_config = { values = [file("./config/calico-tigera-values.yaml")] } - istio_ingress_helm_config = { values = [file("./config/istio/override-values.yaml")] } - kiali_server_helm_config = { values = [file("./config/kiali/override-values.yaml")] } - external_secrets_helm_config = { values = [file("./config/external-secret/override-values.yaml")] } - ingress_nginx_helm_config = { values = [file("./config/override-ingress-nginx.yaml")] } - kubeclarity_helm_config = { values = [file("./config/override-kubeclarity.yaml")] } - fluent_bit_helm_config = { values = [file("./config/override-fluent-bit.yaml")] } - velero_helm_config = { values = [file("./config/override-velero.yaml")] } - new_relic_helm_config = { values = [file("./config/override-new-relic.yaml")] } - kube_state_metrics_helm_config = { values = [file("./config/override-kube-state-matrics.yaml")] } - keda_helm_config = { values = [file("./config/keda/override-keda.yaml")] } - certification_manager_helm_config = { values = [file("./config/override-certification-manager.yaml")] } - filebeat_helm_config = { values = [file("./config/override-filebeat.yaml")] } - reloader_helm_config = { values = [file("./config/reloader/override-reloader.yaml")] } - redis_helm_config = { values = [file("./config/override-redis.yaml")] } + metrics_server_helm_config = { values = [file("./config/override-metrics-server.yaml")] } + cluster_autoscaler_helm_config = { values = [file("./config/override-cluster-autoscaler.yaml")] } + karpenter_helm_config = { values = [file("./config/override-karpenter.yaml")] } + aws_load_balancer_controller_helm_config = { values = [file("./config/override-aws-load-balancer-controller.yaml")] } + aws_node_termination_handler_helm_config = { values = [file("./config/override-aws-node-termination-handler.yaml")] } + aws_efs_csi_driver_helm_config = { values = [file("./config/override-aws-efs-csi-driver.yaml")] } + aws_ebs_csi_driver_helm_config = { values = [file("./config/override-aws-ebs-csi-driver.yaml")] } + calico_tigera_helm_config = { values = [file("./config/calico-tigera-values.yaml")] } + istio_ingress_helm_config = { values = [file("./config/istio/override-values.yaml")] } + kiali_server_helm_config = { values = [file("./config/kiali/override-values.yaml")] } + external_secrets_helm_config = { values = [file("./config/external-secret/override-values.yaml")] } + ingress_nginx_helm_config = { values = [file("./config/override-ingress-nginx.yaml")] } + kubeclarity_helm_config = { values = [file("./config/override-kubeclarity.yaml")] } + fluent_bit_helm_config = { values = [file("./config/override-fluent-bit.yaml")] } + velero_helm_config = { values = [file("./config/override-velero.yaml")] } + new_relic_helm_config = { values = [file("./config/override-new-relic.yaml")] } + kube_state_metrics_helm_config = { values = [file("./config/override-kube-state-matrics.yaml")] } + keda_helm_config = { values = [file("./config/keda/override-keda.yaml")] } + certification_manager_helm_config = { values = [file("./config/override-certification-manager.yaml")] } + filebeat_helm_config = { values = [file("./config/override-filebeat.yaml")] } + reloader_helm_config = { values = [file("./config/reloader/override-reloader.yaml")] } + redis_helm_config = { values = [file("./config/override-redis.yaml")] } + prometheus_cloudwatch_exporter_helm_config = { values = [file("./config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml")] } + prometheus_cloudwatch_exporter_secret_manifest = ["./config/prometheus-cloudwatch-exporter/secret.yaml"] # -- Override Helm Release attributes - metrics_server_extra_configs = var.metrics_server_extra_configs - cluster_autoscaler_extra_configs = var.cluster_autoscaler_extra_configs - karpenter_extra_configs = var.karpenter_extra_configs - aws_load_balancer_controller_extra_configs = var.aws_load_balancer_controller_extra_configs - aws_node_termination_handler_extra_configs = var.aws_node_termination_handler_extra_configs - aws_efs_csi_driver_extra_configs = var.aws_efs_csi_driver_extra_configs - aws_ebs_csi_driver_extra_configs = var.aws_ebs_csi_driver_extra_configs - calico_tigera_extra_configs = var.calico_tigera_extra_configs - istio_ingress_extra_configs = var.istio_ingress_extra_configs - kiali_server_extra_configs = var.kiali_server_extra_configs - ingress_nginx_extra_configs = var.ingress_nginx_extra_configs - kubeclarity_extra_configs = var.kubeclarity_extra_configs - fluent_bit_extra_configs = var.fluent_bit_extra_configs - velero_extra_configs = var.velero_extra_configs - new_relic_extra_configs = var.new_relic_extra_configs - kube_state_metrics_extra_configs = var.kube_state_metrics_extra_configs - keda_extra_configs = var.keda_extra_configs - certification_manager_extra_configs = var.certification_manager_extra_configs - external_secrets_extra_configs = var.external_secrets_extra_configs - filebeat_extra_configs = var.filebeat_extra_configs - reloader_extra_configs = var.reloader_extra_configs - redis_extra_configs = var.redis_extra_configs - + metrics_server_extra_configs = var.metrics_server_extra_configs + cluster_autoscaler_extra_configs = var.cluster_autoscaler_extra_configs + karpenter_extra_configs = var.karpenter_extra_configs + aws_load_balancer_controller_extra_configs = var.aws_load_balancer_controller_extra_configs + aws_node_termination_handler_extra_configs = var.aws_node_termination_handler_extra_configs + aws_efs_csi_driver_extra_configs = var.aws_efs_csi_driver_extra_configs + aws_ebs_csi_driver_extra_configs = var.aws_ebs_csi_driver_extra_configs + calico_tigera_extra_configs = var.calico_tigera_extra_configs + istio_ingress_extra_configs = var.istio_ingress_extra_configs + kiali_server_extra_configs = var.kiali_server_extra_configs + ingress_nginx_extra_configs = var.ingress_nginx_extra_configs + kubeclarity_extra_configs = var.kubeclarity_extra_configs + fluent_bit_extra_configs = var.fluent_bit_extra_configs + velero_extra_configs = var.velero_extra_configs + new_relic_extra_configs = var.new_relic_extra_configs + kube_state_metrics_extra_configs = var.kube_state_metrics_extra_configs + keda_extra_configs = var.keda_extra_configs + certification_manager_extra_configs = var.certification_manager_extra_configs + external_secrets_extra_configs = var.external_secrets_extra_configs + filebeat_extra_configs = var.filebeat_extra_configs + reloader_extra_configs = var.reloader_extra_configs + redis_extra_configs = var.redis_extra_configs + prometheus_cloudwatch_exporter_extra_configs = var.prometheus_cloudwatch_exporter_extra_configs # -- Custom IAM Policy Json for Addon's ServiceAccount external_secrets_iampolicy_json_content = file("./custom-iam-policies/external-secrets.json") diff --git a/_examples/external-eks/variables.tf b/_examples/external-eks/variables.tf index dcc64dd..47185ed 100644 --- a/_examples/external-eks/variables.tf +++ b/_examples/external-eks/variables.tf @@ -171,4 +171,12 @@ variable "redis_extra_configs" { atomic = true timeout = 300 } -} \ No newline at end of file +} + +# ---------------------- PROMETHEUS-CLOUDWATCH-EXPORTER ------------------------------------------------ +variable "prometheus_cloudwatch_exporter_extra_configs" { + type = any + default = { + role_name = "" + } +} From 13754c494af46ddfdbb4aaf2ce2e75985babdd2c Mon Sep 17 00:00:00 2001 From: Anmol Nagpal Date: Thu, 28 Dec 2023 23:54:37 +0530 Subject: [PATCH 05/13] fix- terraform code format command ran --- _examples/complete/variables.tf | 4 ++-- _examples/external-eks/variables.tf | 4 ++-- addons/prometheus-cloudwatch-exporter/main.tf | 5 ++--- addons/prometheus-cloudwatch-exporter/variables.tf | 2 +- variables.tf | 2 +- 5 files changed, 8 insertions(+), 9 deletions(-) diff --git a/_examples/complete/variables.tf b/_examples/complete/variables.tf index b5bcf01..049747e 100644 --- a/_examples/complete/variables.tf +++ b/_examples/complete/variables.tf @@ -211,8 +211,8 @@ variable "actions_runner_controller_extra_configs" { # ---------------------- PROMETHEUS-CLOUDWATCH-EXPORTER ------------------------------------------------ variable "prometheus_cloudwatch_exporter_extra_configs" { - type = any - default = { + type = any + default = { role_name = "" } } diff --git a/_examples/external-eks/variables.tf b/_examples/external-eks/variables.tf index 47185ed..dc3d0bc 100644 --- a/_examples/external-eks/variables.tf +++ b/_examples/external-eks/variables.tf @@ -175,8 +175,8 @@ variable "redis_extra_configs" { # ---------------------- PROMETHEUS-CLOUDWATCH-EXPORTER ------------------------------------------------ variable "prometheus_cloudwatch_exporter_extra_configs" { - type = any - default = { + type = any + default = { role_name = "" } } diff --git a/addons/prometheus-cloudwatch-exporter/main.tf b/addons/prometheus-cloudwatch-exporter/main.tf index d3a8b6c..fc8c2f5 100644 --- a/addons/prometheus-cloudwatch-exporter/main.tf +++ b/addons/prometheus-cloudwatch-exporter/main.tf @@ -17,8 +17,7 @@ module "prometheus_cloudwatch_exporter_secret" { } module "prometheus_cloudwatch_exporter_role" { - count = var.secret_manifest == [] && var.prometheus_cloudwatch_exporter_extra_configs.role_name == "" ? 1 : 0 - # count = var.secret_manifest == [] ? 1 : 0 + count = var.secret_manifest == [] && var.prometheus_cloudwatch_exporter_extra_configs.role_name == "" ? 1 : 0 source = "../helm" manage_via_gitops = var.manage_via_gitops @@ -42,7 +41,7 @@ resource "kubectl_manifest" "secret_manifest" { # Role for AWS Authentication data "aws_iam_policy_document" "role" { - count = length(var.secret_manifest) == 0 && var.prometheus_cloudwatch_exporter_extra_configs.role_name == "" ? 1 : 0 + count = length(var.secret_manifest) == 0 && var.prometheus_cloudwatch_exporter_extra_configs.role_name == "" ? 1 : 0 statement { effect = "Allow" actions = ["sts:AssumeRole"] diff --git a/addons/prometheus-cloudwatch-exporter/variables.tf b/addons/prometheus-cloudwatch-exporter/variables.tf index 096066a..2eb5937 100644 --- a/addons/prometheus-cloudwatch-exporter/variables.tf +++ b/addons/prometheus-cloudwatch-exporter/variables.tf @@ -28,7 +28,7 @@ variable "addon_context" { variable "prometheus_cloudwatch_exporter_extra_configs" { description = "Override attributes of helm_release terraform resource" type = any - default = { + default = { role_name = null } } diff --git a/variables.tf b/variables.tf index 9ed4978..bcb7dfd 100644 --- a/variables.tf +++ b/variables.tf @@ -584,7 +584,7 @@ variable "prometheus_cloudwatch_exporter_helm_config" { variable "prometheus_cloudwatch_exporter_extra_configs" { description = "Override attributes of helm_release terraform resource" type = any - default = { + default = { role_name = "" } } From 4f1ef307892f7e8393a54d2b32ffcca210490c1a Mon Sep 17 00:00:00 2001 From: Anmol Nagpal Date: Thu, 28 Dec 2023 23:57:00 +0530 Subject: [PATCH 06/13] fix- terraform code format command ran --- addons/prometheus-cloudwatch-exporter/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/addons/prometheus-cloudwatch-exporter/main.tf b/addons/prometheus-cloudwatch-exporter/main.tf index fc8c2f5..363cf52 100644 --- a/addons/prometheus-cloudwatch-exporter/main.tf +++ b/addons/prometheus-cloudwatch-exporter/main.tf @@ -17,7 +17,7 @@ module "prometheus_cloudwatch_exporter_secret" { } module "prometheus_cloudwatch_exporter_role" { - count = var.secret_manifest == [] && var.prometheus_cloudwatch_exporter_extra_configs.role_name == "" ? 1 : 0 + count = var.secret_manifest == [] && var.prometheus_cloudwatch_exporter_extra_configs.role_name == "" ? 1 : 0 source = "../helm" manage_via_gitops = var.manage_via_gitops From 69b528e03768a3e9951e2cfddf06c7909ca0d58c Mon Sep 17 00:00:00 2001 From: Anmol Nagpal Date: Fri, 29 Dec 2023 00:46:05 +0530 Subject: [PATCH 07/13] feat: creating namespace in terraform before secrets --- ...etheus-cloudwatch-exporter-controller.yaml | 102 +++++++++++++----- addons/prometheus-cloudwatch-exporter/main.tf | 13 ++- 2 files changed, 86 insertions(+), 29 deletions(-) diff --git a/_examples/complete/config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml b/_examples/complete/config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml index 1b0d32a..8b1661e 100644 --- a/_examples/complete/config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml +++ b/_examples/complete/config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml @@ -22,30 +22,80 @@ resources: # Configuration is rendered with `tpl` function, therefore you can use any Helm variables and/or templates here config: |- # This is the default configuration for prometheus-cloudwatch-exporter - region: eu-west-1 - period_seconds: 240 + region: us-east-1 metrics: - - aws_namespace: AWS/ELB - aws_metric_name: HealthyHostCount - aws_dimensions: [AvailabilityZone, LoadBalancerName] - aws_statistics: [Average] - - - aws_namespace: AWS/ELB - aws_metric_name: UnHealthyHostCount - aws_dimensions: [AvailabilityZone, LoadBalancerName] - aws_statistics: [Average] - - - aws_namespace: AWS/ELB - aws_metric_name: RequestCount - aws_dimensions: [AvailabilityZone, LoadBalancerName] - aws_statistics: [Sum] - - - aws_namespace: AWS/ELB - aws_metric_name: Latency - aws_dimensions: [AvailabilityZone, LoadBalancerName] - aws_statistics: [Average] - - - aws_namespace: AWS/ELB - aws_metric_name: SurgeQueueLength - aws_dimensions: [AvailabilityZone, LoadBalancerName] - aws_statistics: [Maximum, Sum] + - aws_dimensions: + - InstanceId + aws_metric_name: CPUUtilization + aws_namespace: AWS/EC2 + aws_statistics: + - Average + aws_tag_select: + resource_type_selection: ec2:instance + resource_id_dimension: InstanceId + - aws_dimensions: + - InstanceId + aws_metric_name: NetworkIn + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: NetworkOut + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: NetworkPacketsIn + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: NetworkPacketsOut + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: DiskWriteBytes + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: DiskReadBytes + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: CPUCreditBalance + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: CPUCreditUsage + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: StatusCheckFailed + aws_namespace: AWS/EC2 + aws_statistics: + - Sum + - aws_dimensions: + - InstanceId + aws_metric_name: StatusCheckFailed_Instance + aws_namespace: AWS/EC2 + aws_statistics: + - Sum + - aws_dimensions: + - InstanceId + aws_metric_name: StatusCheckFailed_System + aws_namespace: AWS/EC2 + aws_statistics: + - Sum \ No newline at end of file diff --git a/addons/prometheus-cloudwatch-exporter/main.tf b/addons/prometheus-cloudwatch-exporter/main.tf index 363cf52..8c1a0e7 100644 --- a/addons/prometheus-cloudwatch-exporter/main.tf +++ b/addons/prometheus-cloudwatch-exporter/main.tf @@ -30,13 +30,20 @@ module "prometheus_cloudwatch_exporter_role" { value = local.role_name } ] - depends_on = [kubectl_manifest.secret_manifest] + depends_on = [module.prometheus_cloudwatch_exporter_secret] +} + +resource "kubernetes_namespace" "prometheus_cloudwatch_exporter_namespace" { + metadata { + name = local.default_helm_config.namespace + } } # Secret for AWS Authentication with cloudwatch exporter resource "kubectl_manifest" "secret_manifest" { - count = length(var.secret_manifest) - yaml_body = file(var.secret_manifest[count.index]) + count = length(var.secret_manifest) + yaml_body = file(var.secret_manifest[count.index]) + depends_on = [kubernetes_namespace.prometheus_cloudwatch_exporter_namespace] } # Role for AWS Authentication From 2f062f050cbbe115a4da004479d094f95792416a Mon Sep 17 00:00:00 2001 From: Anmol Nagpal Date: Sat, 6 Jan 2024 13:22:16 +0530 Subject: [PATCH 08/13] fix- ran fmt format command --- _examples/complete/main.tf | 48 +++++++++++++++++----------------- _examples/external-eks/main.tf | 6 ++--- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/_examples/complete/main.tf b/_examples/complete/main.tf index 2f440e1..b948fc0 100644 --- a/_examples/complete/main.tf +++ b/_examples/complete/main.tf @@ -152,28 +152,28 @@ module "addons" { eks_cluster_name = module.eks.cluster_name # -- Enable Addons - metrics_server = true - cluster_autoscaler = true - aws_load_balancer_controller = true - aws_node_termination_handler = true - aws_efs_csi_driver = true - aws_ebs_csi_driver = true - kube_state_metrics = true - karpenter = false # -- Set to `false` or comment line to Uninstall Karpenter if installed using terraform. - calico_tigera = true - new_relic = true - kubeclarity = true - ingress_nginx = true - fluent_bit = true - velero = true - keda = true - certification_manager = true - filebeat = true - reloader = true - external_dns = true - redis = true - actions_runner_controller = true - prometheus = true + metrics_server = true + cluster_autoscaler = true + aws_load_balancer_controller = true + aws_node_termination_handler = true + aws_efs_csi_driver = true + aws_ebs_csi_driver = true + kube_state_metrics = true + karpenter = false # -- Set to `false` or comment line to Uninstall Karpenter if installed using terraform. + calico_tigera = true + new_relic = true + kubeclarity = true + ingress_nginx = true + fluent_bit = true + velero = true + keda = true + certification_manager = true + filebeat = true + reloader = true + external_dns = true + redis = true + actions_runner_controller = true + prometheus = true prometheus_cloudwatch_exporter = true @@ -215,7 +215,7 @@ module "addons" { external_dns_helm_config = { values = [file("./config/override-external-dns.yaml")] } redis_helm_config = { values = [file("./config/override-redis.yaml")] } actions_runner_controller_helm_config = { values = [file("./config/override-actions-runner-controller.yaml")] } - prometheus_helm_config = { values = [file("./config/override-prometheus.yaml")] } + prometheus_helm_config = { values = [file("./config/override-prometheus.yaml")] } prometheus_cloudwatch_exporter_helm_config = { values = [file("./config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml")] } prometheus_cloudwatch_exporter_secret_manifest = ["./config/prometheus-cloudwatch-exporter/secret.yaml"] @@ -244,7 +244,7 @@ module "addons" { external_dns_extra_configs = var.external_dns_extra_configs redis_extra_configs = var.redis_extra_configs actions_runner_controller_extra_configs = var.actions_runner_controller_extra_configs - prometheus_extra_configs = var.prometheus_extra_configs + prometheus_extra_configs = var.prometheus_extra_configs prometheus_cloudwatch_exporter_extra_configs = var.prometheus_cloudwatch_exporter_extra_configs # -- Custom IAM Policy Json for Addon's ServiceAccount diff --git a/_examples/external-eks/main.tf b/_examples/external-eks/main.tf index ee084c6..e07b598 100644 --- a/_examples/external-eks/main.tf +++ b/_examples/external-eks/main.tf @@ -32,7 +32,7 @@ module "addons" { filebeat = true reloader = true redis = true - prometheus = true + prometheus = true prometheus_cloudwatch_exporter = true # Grafana Deployment @@ -71,7 +71,7 @@ module "addons" { filebeat_helm_config = { values = [file("./config/override-filebeat.yaml")] } reloader_helm_config = { values = [file("./config/reloader/override-reloader.yaml")] } redis_helm_config = { values = [file("./config/override-redis.yaml")] } - prometheus_helm_config = { values = [file("./config/override-prometheus.yaml")] } + prometheus_helm_config = { values = [file("./config/override-prometheus.yaml")] } prometheus_cloudwatch_exporter_helm_config = { values = [file("./config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml")] } prometheus_cloudwatch_exporter_secret_manifest = ["./config/prometheus-cloudwatch-exporter/secret.yaml"] @@ -98,7 +98,7 @@ module "addons" { filebeat_extra_configs = var.filebeat_extra_configs reloader_extra_configs = var.reloader_extra_configs redis_extra_configs = var.redis_extra_configs - prometheus_extra_configs = var.prometheus_extra_configs + prometheus_extra_configs = var.prometheus_extra_configs prometheus_cloudwatch_exporter_extra_configs = var.prometheus_cloudwatch_exporter_extra_configs # -- Custom IAM Policy Json for Addon's ServiceAccount From 77e631400ac4838d850c762850f47de91b64675a Mon Sep 17 00:00:00 2001 From: Anmol Nagpal Date: Mon, 5 Feb 2024 14:41:21 +0530 Subject: [PATCH 09/13] feat: fixed authentication for cloudwatch exporter addon --- .../config/grafana/override-grafana.yaml | 13 + .../complete/config/override-prometheus.yaml | 435 +++++++++++++++++- .../secret.yaml | 4 +- _examples/complete/locals.tf | 3 +- _examples/complete/main.tf | 57 ++- _examples/complete/variables.tf | 7 +- _examples/external-eks/variables.tf | 6 +- .../prometheus-cloudwatch-exporter/README.md | 127 +++-- .../prometheus-cloudwatch-exporter/locals.tf | 3 - addons/prometheus-cloudwatch-exporter/main.tf | 91 ++-- .../variables.tf | 4 +- addons/prometheus/README.md | 4 + variables.tf | 5 +- 13 files changed, 601 insertions(+), 158 deletions(-) diff --git a/_examples/complete/config/grafana/override-grafana.yaml b/_examples/complete/config/grafana/override-grafana.yaml index de95717..f33d35d 100644 --- a/_examples/complete/config/grafana/override-grafana.yaml +++ b/_examples/complete/config/grafana/override-grafana.yaml @@ -14,3 +14,16 @@ resources: requests: cpu: 50m memory: 150Mi + +# Uncomment belew code to use Prometheus Data Source for Grafana Dashboard as default +# datasources: +# datasources.yaml: +# apiVersion: 1 +# datasources: +# - name: Prometheus +# type: prometheus +# url: http://prometheus-server.monitoring.svc.cluster.local:80 +# access: proxy +# isDefault: true +# uid: prometheus +# editable: true \ No newline at end of file diff --git a/_examples/complete/config/override-prometheus.yaml b/_examples/complete/config/override-prometheus.yaml index e83cbcf..ebe0704 100644 --- a/_examples/complete/config/override-prometheus.yaml +++ b/_examples/complete/config/override-prometheus.yaml @@ -33,4 +33,437 @@ prometheus-node-exporter: # Dependency for prometheus server enabled: true prometheus-pushgateway: # Dependency for prometheus server - enabled: true \ No newline at end of file + enabled: true + +serverFiles: + prometheus.yml: + rule_files: + - /etc/config/recording_rules.yml + - /etc/config/alerting_rules.yml + ## Below two files are DEPRECATED will be removed from this default values file + - /etc/config/rules + - /etc/config/alerts + + scrape_configs: + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + + # Scrape config for API servers. + # + # Kubernetes exposes API servers as endpoints to the default/kubernetes + # service so this uses `endpoints` role and uses relabelling to only keep + # the endpoints associated with the default/kubernetes service using the + # default named port `https`. This works for single API server deployments as + # well as HA API server deployments. + - job_name: 'kubernetes-apiservers' + + kubernetes_sd_configs: + - role: endpoints + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + # Keep only the default/kubernetes service endpoints for the https port. This + # will add targets for each API server which Kubernetes adds an endpoint to + # the default/kubernetes service. + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + + - job_name: 'kubernetes-nodes' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics + + + - job_name: 'kubernetes-nodes-cadvisor' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + # This configuration will work only on kubelet 1.7.3+ + # As the scrape endpoints for cAdvisor have changed + # if you are using older version you need to change the replacement to + # replacement: /api/v1/nodes/$1:4194/proxy/metrics + # more info here https://github.com/coreos/prometheus-operator/issues/633 + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + + # Metric relabel configs to apply to samples before ingestion. + # [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs) + # metric_relabel_configs: + # - action: labeldrop + # regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone) + + # Scrape config for service endpoints. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape`: Only scrape services that have a value of + # `true`, except if `prometheus.io/scrape-slow` is set to `true` as well. + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + # * `prometheus.io/param_`: If the metrics endpoint uses parameters + # then you can set any parameter + - job_name: 'kubernetes-service-endpoints' + honor_labels: true + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] + action: drop + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: (.+?)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: service + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + + # Scrape config for slow service endpoints; same as above, but with a larger + # timeout and a larger interval + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + # * `prometheus.io/param_`: If the metrics endpoint uses parameters + # then you can set any parameter + - job_name: 'kubernetes-service-endpoints-slow' + honor_labels: true + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: (.+?)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: service + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + + - job_name: 'prometheus-pushgateway' + honor_labels: true + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: pushgateway + + # Example scrape config for probing services via the Blackbox Exporter. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/probe`: Only probe services that have a value of `true` + - job_name: 'kubernetes-services' + honor_labels: true + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: true + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: blackbox + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: service + + # Example scrape config for pods + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`, + # except if `prometheus.io/scrape-slow` is set to `true` as well. + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods' + honor_labels: true + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] + action: drop + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + regex: (https?) + target_label: __scheme__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] + action: replace + regex: (\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}) + replacement: '[$2]:$1' + target_label: __address__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] + action: replace + regex: (\d+);((([0-9]+?)(\.|$)){4}) + replacement: $2:$1 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + + # Example Scrape config for pods which should be scraped slower. An useful example + # would be stackriver-exporter which queries an API on every scrape of the pod + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods-slow' + honor_labels: true + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + regex: (https?) + target_label: __scheme__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] + action: replace + regex: (\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}) + replacement: '[$2]:$1' + target_label: __address__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] + action: replace + regex: (\d+);((([0-9]+?)(\.|$)){4}) + replacement: $2:$1 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + + # This is Scrape config for Prometheus Cloudwatch Exporter service to get scrape + # Uncomment Below Code to use prometheus Cloudwatch Exporter as a scrape config for prometheus + + # - job_name: cloudwatch-exporter + # static_configs: + # - targets: + # - 'prometheus-cloudwatch-exporter.monitoring.svc.cluster.local:9106' + # labels: + # csp: 'aws' + # cn: '' + # account_id: '' \ No newline at end of file diff --git a/_examples/complete/config/prometheus-cloudwatch-exporter/secret.yaml b/_examples/complete/config/prometheus-cloudwatch-exporter/secret.yaml index 13df3ad..434cb78 100644 --- a/_examples/complete/config/prometheus-cloudwatch-exporter/secret.yaml +++ b/_examples/complete/config/prometheus-cloudwatch-exporter/secret.yaml @@ -5,6 +5,6 @@ metadata: namespace: monitoring # Namespace of Prometheus Cloudwatch Exporter addon destination type: Opaque data: - access_key: QUtJQVdGV0VLSlBTQU9INVlIRlQK # Encoded AWS Access key - Enter Correct AWS Access Key Encoded with base64 - secret_key: SjZLVDRTSkZIVG9leTQ1M2hadllmMWZpR2pYa0l1UkFmYkhLRHpUdAo= # Encoded AWS Secret Access key - Enter Correct AWS Secret Access Key Encoded with base64 + access_key: QUiNNXXxXxzJKTDIzMzhOIOdidr= # Encoded AWS Access key - Enter Correct AWS Access Key Encoded with base64 + secret_key: RlBTUVdxZUxXxxxXxxXXx2JLSkVxxXxxXXXXxxxXXXxxxXXXazNZVQ== # Encoded AWS Secret Access key - Enter Correct AWS Secret Access Key Encoded with base64 # Refer https://www.baeldung.com/linux/cli-base64-encode-decode this URL to Encode and Decode of String in Base64 \ No newline at end of file diff --git a/_examples/complete/locals.tf b/_examples/complete/locals.tf index e0fb2c9..5e831a8 100644 --- a/_examples/complete/locals.tf +++ b/_examples/complete/locals.tf @@ -1,4 +1,3 @@ - locals { name = "helm-addons" @@ -14,5 +13,5 @@ locals { GithubRepo = "terraform-helm-eks-addons" GithubOrg = "clouddrove" } - cluster_version = "1.26" + cluster_version = "1.29" } \ No newline at end of file diff --git a/_examples/complete/main.tf b/_examples/complete/main.tf index b948fc0..a48f3d6 100644 --- a/_examples/complete/main.tf +++ b/_examples/complete/main.tf @@ -176,8 +176,7 @@ module "addons" { prometheus = true prometheus_cloudwatch_exporter = true - - # Grafaa Deployment + # Grafana Deployment grafana = true grafana_helm_config = { values = [file("./config/grafana/override-grafana.yaml")] } grafana_manifests = var.grafana_manifests @@ -191,33 +190,33 @@ module "addons" { external_secrets = true # -- Path of override-values.yaml file - metrics_server_helm_config = { values = [file("./config/override-metrics-server.yaml")] } - cluster_autoscaler_helm_config = { values = [file("./config/override-cluster-autoscaler.yaml")] } - karpenter_helm_config = { values = [file("./config/override-karpenter.yaml")] } - aws_load_balancer_controller_helm_config = { values = [file("./config/override-aws-load-balancer-controller.yaml")] } - aws_node_termination_handler_helm_config = { values = [file("./config/override-aws-node-termination-handler.yaml")] } - aws_efs_csi_driver_helm_config = { values = [file("./config/override-aws-efs-csi-driver.yaml")] } - aws_ebs_csi_driver_helm_config = { values = [file("./config/override-aws-ebs-csi-driver.yaml")] } - calico_tigera_helm_config = { values = [file("./config/calico-tigera-values.yaml")] } - istio_ingress_helm_config = { values = [file("./config/istio/override-values.yaml")] } - kiali_server_helm_config = { values = [file("./config/kiali/override-values.yaml")] } - external_secrets_helm_config = { values = [file("./config/external-secret/override-values.yaml")] } - ingress_nginx_helm_config = { values = [file("./config/override-ingress-nginx.yaml")] } - kubeclarity_helm_config = { values = [file("./config/override-kubeclarity.yaml")] } - fluent_bit_helm_config = { values = [file("./config/override-fluent-bit.yaml")] } - velero_helm_config = { values = [file("./config/override-velero.yaml")] } - new_relic_helm_config = { values = [file("./config/override-new-relic.yaml")] } - kube_state_metrics_helm_config = { values = [file("./config/override-kube-state-matrics.yaml")] } - keda_helm_config = { values = [file("./config/keda/override-keda.yaml")] } - certification_manager_helm_config = { values = [file("./config/override-certification-manager.yaml")] } - filebeat_helm_config = { values = [file("./config/override-filebeat.yaml")] } - reloader_helm_config = { values = [file("./config/reloader/override-reloader.yaml")] } - external_dns_helm_config = { values = [file("./config/override-external-dns.yaml")] } - redis_helm_config = { values = [file("./config/override-redis.yaml")] } - actions_runner_controller_helm_config = { values = [file("./config/override-actions-runner-controller.yaml")] } - prometheus_helm_config = { values = [file("./config/override-prometheus.yaml")] } - prometheus_cloudwatch_exporter_helm_config = { values = [file("./config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml")] } - prometheus_cloudwatch_exporter_secret_manifest = ["./config/prometheus-cloudwatch-exporter/secret.yaml"] + metrics_server_helm_config = { values = [file("./config/override-metrics-server.yaml")] } + cluster_autoscaler_helm_config = { values = [file("./config/override-cluster-autoscaler.yaml")] } + karpenter_helm_config = { values = [file("./config/override-karpenter.yaml")] } + aws_load_balancer_controller_helm_config = { values = [file("./config/override-aws-load-balancer-controller.yaml")] } + aws_node_termination_handler_helm_config = { values = [file("./config/override-aws-node-termination-handler.yaml")] } + aws_efs_csi_driver_helm_config = { values = [file("./config/override-aws-efs-csi-driver.yaml")] } + aws_ebs_csi_driver_helm_config = { values = [file("./config/override-aws-ebs-csi-driver.yaml")] } + calico_tigera_helm_config = { values = [file("./config/calico-tigera-values.yaml")] } + istio_ingress_helm_config = { values = [file("./config/istio/override-values.yaml")] } + kiali_server_helm_config = { values = [file("./config/kiali/override-values.yaml")] } + external_secrets_helm_config = { values = [file("./config/external-secret/override-values.yaml")] } + ingress_nginx_helm_config = { values = [file("./config/override-ingress-nginx.yaml")] } + kubeclarity_helm_config = { values = [file("./config/override-kubeclarity.yaml")] } + fluent_bit_helm_config = { values = [file("./config/override-fluent-bit.yaml")] } + velero_helm_config = { values = [file("./config/override-velero.yaml")] } + new_relic_helm_config = { values = [file("./config/override-new-relic.yaml")] } + kube_state_metrics_helm_config = { values = [file("./config/override-kube-state-matrics.yaml")] } + keda_helm_config = { values = [file("./config/keda/override-keda.yaml")] } + certification_manager_helm_config = { values = [file("./config/override-certification-manager.yaml")] } + filebeat_helm_config = { values = [file("./config/override-filebeat.yaml")] } + reloader_helm_config = { values = [file("./config/reloader/override-reloader.yaml")] } + external_dns_helm_config = { values = [file("./config/override-external-dns.yaml")] } + redis_helm_config = { values = [file("./config/override-redis.yaml")] } + actions_runner_controller_helm_config = { values = [file("./config/override-actions-runner-controller.yaml")] } + prometheus_helm_config = { values = [file("./config/override-prometheus.yaml")] } + prometheus_cloudwatch_exporter_helm_config = { values = [file("./config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml")] } + # prometheus_cloudwatch_exporter_secret_manifest = ["./config/prometheus-cloudwatch-exporter/secret.yaml"] # Uncomment this to use Secret Based Authentication and Update Secret manifest with real credentials # -- Override Helm Release attributes metrics_server_extra_configs = var.metrics_server_extra_configs diff --git a/_examples/complete/variables.tf b/_examples/complete/variables.tf index b5ae7b6..ca87056 100644 --- a/_examples/complete/variables.tf +++ b/_examples/complete/variables.tf @@ -213,7 +213,7 @@ variable "actions_runner_controller_extra_configs" { variable "prometheus_cloudwatch_exporter_extra_configs" { type = any default = { - role_name = "" + atomic = true } } @@ -221,8 +221,7 @@ variable "prometheus_cloudwatch_exporter_extra_configs" { variable "prometheus_extra_configs" { type = any default = { - atomic = true - namespace = "istio-system" + atomic = true } } @@ -239,6 +238,6 @@ variable "grafana_manifests" { grafana_virtualservice_file_path = string }) default = { - grafana_virtualservice_file_path = "./config/grafana/grafana-vs.yaml" + grafana_virtualservice_file_path = "" } } \ No newline at end of file diff --git a/_examples/external-eks/variables.tf b/_examples/external-eks/variables.tf index 49aaac1..b182e03 100644 --- a/_examples/external-eks/variables.tf +++ b/_examples/external-eks/variables.tf @@ -201,8 +201,6 @@ variable "grafana_manifests" { # ---------------------- PROMETHEUS-CLOUDWATCH-EXPORTER ------------------------------------------------ variable "prometheus_cloudwatch_exporter_extra_configs" { - type = any - default = { - role_name = "" - } + type = any + default = {} } \ No newline at end of file diff --git a/addons/prometheus-cloudwatch-exporter/README.md b/addons/prometheus-cloudwatch-exporter/README.md index b4c6e89..78a05d4 100644 --- a/addons/prometheus-cloudwatch-exporter/README.md +++ b/addons/prometheus-cloudwatch-exporter/README.md @@ -1,6 +1,6 @@ # Prometheus Cloudwatch Exporter Helm Chart -The CloudWatch Exporter for Prometheus is a tool that allows you to export Amazon CloudWatch metrics in the Prometheus format. Amazon CloudWatch is a monitoring and observability service provided by AWS that provides metrics, logs, and traces from AWS resources and applications +The CloudWatch Exporter for Prometheus is a tool that allows you to export Amazon CloudWatch metrics in the Prometheus format. Amazon CloudWatch is a monitoring and observability service provided by AWS that provides metrics, logs, and traces from AWS resources and applications. ## Installation Below terraform script describes how to use Prometheus Cloudwatch Exporter Terraform Addon, A complete example is also given [here](https://github.com/clouddrove/terraform-helm-eks-addons/blob/master/_examples/complete/main.tf). @@ -44,34 +44,83 @@ resources: # This config is for AWS Load balancer config: |- # This is the default configuration for prometheus-cloudwatch-exporter - region: eu-west-1 - period_seconds: 240 + region: us-east-1 metrics: - - aws_namespace: AWS/ELB - aws_metric_name: HealthyHostCount - aws_dimensions: [AvailabilityZone, LoadBalancerName] - aws_statistics: [Average] - - - aws_namespace: AWS/ELB - aws_metric_name: UnHealthyHostCount - aws_dimensions: [AvailabilityZone, LoadBalancerName] - aws_statistics: [Average] - - - aws_namespace: AWS/ELB - aws_metric_name: RequestCount - aws_dimensions: [AvailabilityZone, LoadBalancerName] - aws_statistics: [Sum] - - - aws_namespace: AWS/ELB - aws_metric_name: Latency - aws_dimensions: [AvailabilityZone, LoadBalancerName] - aws_statistics: [Average] - - - aws_namespace: AWS/ELB - aws_metric_name: SurgeQueueLength - aws_dimensions: [AvailabilityZone, LoadBalancerName] - aws_statistics: [Maximum, Sum] - + - aws_dimensions: + - InstanceId + aws_metric_name: CPUUtilization + aws_namespace: AWS/EC2 + aws_statistics: + - Average + aws_tag_select: + resource_type_selection: ec2:instance + resource_id_dimension: InstanceId + - aws_dimensions: + - InstanceId + aws_metric_name: NetworkIn + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: NetworkOut + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: NetworkPacketsIn + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: NetworkPacketsOut + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: DiskWriteBytes + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: DiskReadBytes + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: CPUCreditBalance + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: CPUCreditUsage + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: StatusCheckFailed + aws_namespace: AWS/EC2 + aws_statistics: + - Sum + - aws_dimensions: + - InstanceId + aws_metric_name: StatusCheckFailed_Instance + aws_namespace: AWS/EC2 + aws_statistics: + - Sum + - aws_dimensions: + - InstanceId + aws_metric_name: StatusCheckFailed_System + aws_namespace: AWS/EC2 + aws_statistics: + - Sum ``` ## Authentication @@ -80,19 +129,15 @@ config: |- ### Using Secrets - Update Access key and Secret Access keys from the config files provided in the examples. -### Using Role (Default) -- Don't pass secret to use Role based authentication. -- A Role and Policy will be created for authentication with AWS. -- Pass RoleName if you have existing role for the authentication: -```hcl -prometheus_cloudwatch_exporter_extra_configs = { - role_name = "prometheus_cloudwatch_exporter_role" -} -``` -- To override the default policy create `json` format file and pass it like this: -```hcl -prometheus_cloudwatch_exporter_iampolicy_json_content = file("./custom-iam-policies/prometheus-cloudwatch-exporter.json") -``` +### Service Account (Default) +- Don't pass secret to use Service Based authentication. +- Minimal Required Permissions are allowed to the service account for Prometheus Cloudwatch Exporter. + +## Additional Configuration and Use +- Prometheus Cloudwatch Exporter is just a Exporter, that need to be used in prometheus as a exporter to scrape details from Exporter + +### Prometheus Scrape Config +- Checkout [this](https://github.com/clouddrove/terraform-aws-eks-addons/blob/master/_examples/complete/config/override-prometheus.yaml) Prometheus Configuration to add scrape config for Prometheus Cloudwatch Exporter. diff --git a/addons/prometheus-cloudwatch-exporter/locals.tf b/addons/prometheus-cloudwatch-exporter/locals.tf index 023ca21..9b8643d 100644 --- a/addons/prometheus-cloudwatch-exporter/locals.tf +++ b/addons/prometheus-cloudwatch-exporter/locals.tf @@ -38,7 +38,4 @@ locals { local.default_helm_config, var.helm_config ) - - role_name = coalesce(var.prometheus_cloudwatch_exporter_extra_configs.role_name, "${local.name}-${var.eks_cluster_name}-role") - policy_name = "${local.name}-${var.eks_cluster_name}-policy" } diff --git a/addons/prometheus-cloudwatch-exporter/main.tf b/addons/prometheus-cloudwatch-exporter/main.tf index 8c1a0e7..f8fc2e2 100644 --- a/addons/prometheus-cloudwatch-exporter/main.tf +++ b/addons/prometheus-cloudwatch-exporter/main.tf @@ -17,7 +17,7 @@ module "prometheus_cloudwatch_exporter_secret" { } module "prometheus_cloudwatch_exporter_role" { - count = var.secret_manifest == [] && var.prometheus_cloudwatch_exporter_extra_configs.role_name == "" ? 1 : 0 + count = length(var.secret_manifest) == 0 ? 1 : 0 source = "../helm" manage_via_gitops = var.manage_via_gitops @@ -26,16 +26,25 @@ module "prometheus_cloudwatch_exporter_role" { set_values = [ { - name = "aws.role" - value = local.role_name + name = "serviceAccount.create" + value = "false" + }, + { + name = "serviceAccount.name" + value = "${local.name}-sa" } ] - depends_on = [module.prometheus_cloudwatch_exporter_secret] -} -resource "kubernetes_namespace" "prometheus_cloudwatch_exporter_namespace" { - metadata { - name = local.default_helm_config.namespace + + # -- IRSA Configurations + irsa_config = { + irsa_iam_policies = [aws_iam_policy.policy.arn] + irsa_iam_role_name = "${local.name}-${var.eks_cluster_name}" + create_kubernetes_namespace = false + kubernetes_service_account = "${local.name}-sa" + kubernetes_namespace = local.default_helm_config.namespace + eks_oidc_provider_arn = var.addon_context.eks_oidc_provider_arn + account_id = var.addon_context.aws_caller_identity_account_id } } @@ -46,30 +55,14 @@ resource "kubectl_manifest" "secret_manifest" { depends_on = [kubernetes_namespace.prometheus_cloudwatch_exporter_namespace] } -# Role for AWS Authentication -data "aws_iam_policy_document" "role" { - count = length(var.secret_manifest) == 0 && var.prometheus_cloudwatch_exporter_extra_configs.role_name == "" ? 1 : 0 - statement { - effect = "Allow" - actions = ["sts:AssumeRole"] - - principals { - type = "Service" - identifiers = ["eks.amazonaws.com"] - } +resource "kubernetes_namespace" "prometheus_cloudwatch_exporter_namespace" { + metadata { + name = local.default_helm_config.namespace } } -resource "aws_iam_role" "role" { - count = length(var.secret_manifest) == 0 && var.prometheus_cloudwatch_exporter_extra_configs.role_name == "" ? 1 : 0 - name = local.role_name - assume_role_policy = data.aws_iam_policy_document.role[0].json -} - -# Policy of the Role resource "aws_iam_policy" "policy" { - count = length(var.secret_manifest) == 0 && var.prometheus_cloudwatch_exporter_extra_configs.role_name == "" ? 1 : 0 - name = local.policy_name + name = "${local.name}-${var.eks_cluster_name}" path = "/" description = "IAM Policy used by ${local.name}-${var.eks_cluster_name} IAM Role" policy = var.iampolicy_json_content != null ? var.iampolicy_json_content : <<-EOT @@ -77,56 +70,24 @@ resource "aws_iam_policy" "policy" { "Version": "2012-10-17", "Statement": [ { - "Sid": "AllowReadingMetricsFromCloudWatch", + "Sid": "AllowCloudwatch", "Effect": "Allow", "Action": [ - "cloudwatch:DescribeAlarmsForMetric", - "cloudwatch:DescribeAlarmHistory", - "cloudwatch:DescribeAlarms", "cloudwatch:ListMetrics", - "cloudwatch:GetMetricData", - "cloudwatch:GetInsightRuleReport", - "cloudwatch:GetMetricStatistics" + "cloudwatch:GetMetricStatistics", + "cloudwatch:GetMetricData" ], "Resource": "*" }, { - "Sid": "AllowReadingLogsFromCloudWatch", + "Sid": "AllowResourceTagging", "Effect": "Allow", "Action": [ - "logs:DescribeLogGroups", - "logs:GetLogGroupFields", - "logs:StartQuery", - "logs:StopQuery", - "logs:GetQueryResults", - "logs:GetLogEvents" + "tag:GetResources" ], "Resource": "*" - }, - { - "Sid": "AllowReadingTagsInstancesRegionsFromEC2", - "Effect": "Allow", - "Action": [ - "ec2:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeRegions" - ], - "Resource": "*" - }, - { - "Sid": "AllowReadingResourcesForTags", - "Effect": "Allow", - "Action": "tag:GetResources", - "Resource": "*" } ] } EOT -} - -# Policy Attachment with Role -resource "aws_iam_role_policy_attachment" "prometheus_cloudwatch_exporter_policy" { - count = length(var.secret_manifest) == 0 && var.prometheus_cloudwatch_exporter_extra_configs.role_name == "" ? 1 : 0 - policy_arn = aws_iam_policy.policy[0].arn - role = aws_iam_role.role[0].name } \ No newline at end of file diff --git a/addons/prometheus-cloudwatch-exporter/variables.tf b/addons/prometheus-cloudwatch-exporter/variables.tf index 2eb5937..c0c2246 100644 --- a/addons/prometheus-cloudwatch-exporter/variables.tf +++ b/addons/prometheus-cloudwatch-exporter/variables.tf @@ -28,9 +28,7 @@ variable "addon_context" { variable "prometheus_cloudwatch_exporter_extra_configs" { description = "Override attributes of helm_release terraform resource" type = any - default = { - role_name = null - } + default = {} } variable "secret_manifest" { diff --git a/addons/prometheus/README.md b/addons/prometheus/README.md index 299a673..969eb50 100644 --- a/addons/prometheus/README.md +++ b/addons/prometheus/README.md @@ -25,6 +25,10 @@ module "addons" { } ``` +## Configuration +- Prometheus is a data scraper that will scrape the config from target and store it in Volume or Storge. +- Prometheus is used for Monitoring and Logging with Grafana servic. Checkout [this](https://github.com/clouddrove/terraform-aws-eks-addons/blob/master/_examples/complete/config/grafana/override-grafana.yaml) Grafana default configuration to add Prometheus URL as a data-source for Grafana dashboard. + ## Requirements diff --git a/variables.tf b/variables.tf index e447481..2911574 100644 --- a/variables.tf +++ b/variables.tf @@ -568,7 +568,6 @@ variable "actions_runner_controller_extra_configs" { default = {} } - #-----------PROMETHEUS--------------------------- variable "prometheus" { description = "Enable prometheus add-on" @@ -633,9 +632,7 @@ variable "prometheus_cloudwatch_exporter_helm_config" { variable "prometheus_cloudwatch_exporter_extra_configs" { description = "Override attributes of helm_release terraform resource" type = any - default = { - role_name = "" - } + default = {} } variable "prometheus_cloudwatch_exporter_secret_manifest" { From 3e5a5fdac9fff3981d277a0604a5e6a8be1b1af4 Mon Sep 17 00:00:00 2001 From: Anmol Nagpal Date: Mon, 5 Feb 2024 16:27:34 +0530 Subject: [PATCH 10/13] fix: added output for exporter addon in root output file --- outputs.tf | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/outputs.tf b/outputs.tf index add080b..c8cb1f9 100644 --- a/outputs.tf +++ b/outputs.tf @@ -410,4 +410,18 @@ output "grafana_chart_version" { output "grafana_repository" { value = module.grafana[*].repository description = "Helm chart repository of the grafana." +} + +#----------- Prometheus Cloudwatch Exporter ------------------------ +output "prometheus_cloudwatch_exporter_namespace" { + value = module.prometheus_cloudwatch_exporter[*].namespace + description = "The namespace where Prometheus Cloudwatch Exporter is deployed." +} +output "prometheus_cloudwatch_exporter_chart_version" { + value = module.prometheus_cloudwatch_exporter[*].chart_version + description = "Chart version of the Prometheus Cloudwatch Exporter Helm Chart." +} +output "prometheus_cloudwatch_exporter_repository" { + value = module.prometheus_cloudwatch_exporter[*].repository + description = "Helm chart repository of the Prometheus Cloudwatch Exporter." } \ No newline at end of file From ac3837adf1b64a3bf8ced2d23b83d4d06ab734aa Mon Sep 17 00:00:00 2001 From: Anmol Nagpal Date: Mon, 5 Feb 2024 19:03:51 +0530 Subject: [PATCH 11/13] docs: update readme for addon latest version --- _examples/complete/main.tf | 2 +- addons/keda/README.md | 1 - addons/kiali-server/README.md | 1 - addons/kube-state-metrics/README.md | 1 - addons/metrics-server/README.md | 1 - addons/nri-bundle/README.md | 1 - addons/prometheus-cloudwatch-exporter/README.md | 1 - 7 files changed, 1 insertion(+), 7 deletions(-) diff --git a/_examples/complete/main.tf b/_examples/complete/main.tf index a48f3d6..568e969 100644 --- a/_examples/complete/main.tf +++ b/_examples/complete/main.tf @@ -257,7 +257,7 @@ module "addons-internal" { depends_on = [module.eks] eks_cluster_name = module.eks.cluster_name - istio_ingress = false + istio_ingress = true istio_manifests = var.istio_manifests_internal istio_ingress_extra_configs = var.istio_ingress_extra_configs_internal } \ No newline at end of file diff --git a/addons/keda/README.md b/addons/keda/README.md index cbe8b41..d6e31ab 100644 --- a/addons/keda/README.md +++ b/addons/keda/README.md @@ -7,7 +7,6 @@ Below terraform script shows how to use Keda Terraform Addon, A complete example ```hcl module "addons" { source = "clouddrove/eks-addons/aws" - version = "0.0.9" depends_on = [module.eks.cluster_id] eks_cluster_name = module.eks.cluster_name diff --git a/addons/kiali-server/README.md b/addons/kiali-server/README.md index 19ce10b..f387032 100644 --- a/addons/kiali-server/README.md +++ b/addons/kiali-server/README.md @@ -5,7 +5,6 @@ Below terraform script shows how to use Kiali-Server Terraform Addon, A complete ```hcl module "addons" { source = "clouddrove/eks-addons/aws" - version = "0.0.1" depends_on = [module.eks.cluster_id] eks_cluster_name = module.eks.cluster_name diff --git a/addons/kube-state-metrics/README.md b/addons/kube-state-metrics/README.md index 2f19738..4b87f15 100644 --- a/addons/kube-state-metrics/README.md +++ b/addons/kube-state-metrics/README.md @@ -10,7 +10,6 @@ Below terraform script shows how to use External Secrets Terraform Addon, A comp ```hcl module "addons" { source = "clouddrove/eks-addons/aws" - version = "0.0.6" depends_on = [module.eks] eks_cluster_name = module.eks.cluster_name diff --git a/addons/metrics-server/README.md b/addons/metrics-server/README.md index 767db5a..c6b25e5 100644 --- a/addons/metrics-server/README.md +++ b/addons/metrics-server/README.md @@ -7,7 +7,6 @@ Below terraform script shows how to use Metrics-Server Terraform Addon, A comple ```hcl module "addons" { source = "clouddrove/eks-addons/aws" - version = "0.0.1" depends_on = [module.eks.cluster_id] eks_cluster_name = module.eks.cluster_name diff --git a/addons/nri-bundle/README.md b/addons/nri-bundle/README.md index ebd3ac6..b07d430 100644 --- a/addons/nri-bundle/README.md +++ b/addons/nri-bundle/README.md @@ -7,7 +7,6 @@ Below terraform script shows how to use New-Relic Terraform Addon, A complete ex ```hcl module "addons" { source = "clouddrove/eks-addons/aws" - version = "0.0.1" depends_on = [module.eks.cluster_id] eks_cluster_name = module.eks.cluster_name diff --git a/addons/prometheus-cloudwatch-exporter/README.md b/addons/prometheus-cloudwatch-exporter/README.md index 78a05d4..1b53f6d 100644 --- a/addons/prometheus-cloudwatch-exporter/README.md +++ b/addons/prometheus-cloudwatch-exporter/README.md @@ -7,7 +7,6 @@ Below terraform script describes how to use Prometheus Cloudwatch Exporter Terra ```hcl module "addons" { source = "clouddrove/eks-addons/aws" - version = "0.0.9" depends_on = [module.eks.cluster_id] eks_cluster_name = module.eks.cluster_name From 7bf953fcb3a7eded9e1aa42e0abe8b6b888bd4ce Mon Sep 17 00:00:00 2001 From: Anmol Nagpal Date: Mon, 5 Feb 2024 20:08:29 +0530 Subject: [PATCH 12/13] fix: fix condition in secret manifest file to prevent multiple secret creation --- _examples/complete/main.tf | 54 +- _examples/complete/plan.txt | 2054 +++++++++++++++++ _examples/external-eks/main.tf | 2 +- addons/prometheus-cloudwatch-exporter/main.tf | 10 +- .../variables.tf | 4 +- variables.tf | 6 +- 6 files changed, 2091 insertions(+), 39 deletions(-) create mode 100644 _examples/complete/plan.txt diff --git a/_examples/complete/main.tf b/_examples/complete/main.tf index 568e969..2dafb59 100644 --- a/_examples/complete/main.tf +++ b/_examples/complete/main.tf @@ -190,33 +190,33 @@ module "addons" { external_secrets = true # -- Path of override-values.yaml file - metrics_server_helm_config = { values = [file("./config/override-metrics-server.yaml")] } - cluster_autoscaler_helm_config = { values = [file("./config/override-cluster-autoscaler.yaml")] } - karpenter_helm_config = { values = [file("./config/override-karpenter.yaml")] } - aws_load_balancer_controller_helm_config = { values = [file("./config/override-aws-load-balancer-controller.yaml")] } - aws_node_termination_handler_helm_config = { values = [file("./config/override-aws-node-termination-handler.yaml")] } - aws_efs_csi_driver_helm_config = { values = [file("./config/override-aws-efs-csi-driver.yaml")] } - aws_ebs_csi_driver_helm_config = { values = [file("./config/override-aws-ebs-csi-driver.yaml")] } - calico_tigera_helm_config = { values = [file("./config/calico-tigera-values.yaml")] } - istio_ingress_helm_config = { values = [file("./config/istio/override-values.yaml")] } - kiali_server_helm_config = { values = [file("./config/kiali/override-values.yaml")] } - external_secrets_helm_config = { values = [file("./config/external-secret/override-values.yaml")] } - ingress_nginx_helm_config = { values = [file("./config/override-ingress-nginx.yaml")] } - kubeclarity_helm_config = { values = [file("./config/override-kubeclarity.yaml")] } - fluent_bit_helm_config = { values = [file("./config/override-fluent-bit.yaml")] } - velero_helm_config = { values = [file("./config/override-velero.yaml")] } - new_relic_helm_config = { values = [file("./config/override-new-relic.yaml")] } - kube_state_metrics_helm_config = { values = [file("./config/override-kube-state-matrics.yaml")] } - keda_helm_config = { values = [file("./config/keda/override-keda.yaml")] } - certification_manager_helm_config = { values = [file("./config/override-certification-manager.yaml")] } - filebeat_helm_config = { values = [file("./config/override-filebeat.yaml")] } - reloader_helm_config = { values = [file("./config/reloader/override-reloader.yaml")] } - external_dns_helm_config = { values = [file("./config/override-external-dns.yaml")] } - redis_helm_config = { values = [file("./config/override-redis.yaml")] } - actions_runner_controller_helm_config = { values = [file("./config/override-actions-runner-controller.yaml")] } - prometheus_helm_config = { values = [file("./config/override-prometheus.yaml")] } - prometheus_cloudwatch_exporter_helm_config = { values = [file("./config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml")] } - # prometheus_cloudwatch_exporter_secret_manifest = ["./config/prometheus-cloudwatch-exporter/secret.yaml"] # Uncomment this to use Secret Based Authentication and Update Secret manifest with real credentials + metrics_server_helm_config = { values = [file("./config/override-metrics-server.yaml")] } + cluster_autoscaler_helm_config = { values = [file("./config/override-cluster-autoscaler.yaml")] } + karpenter_helm_config = { values = [file("./config/override-karpenter.yaml")] } + aws_load_balancer_controller_helm_config = { values = [file("./config/override-aws-load-balancer-controller.yaml")] } + aws_node_termination_handler_helm_config = { values = [file("./config/override-aws-node-termination-handler.yaml")] } + aws_efs_csi_driver_helm_config = { values = [file("./config/override-aws-efs-csi-driver.yaml")] } + aws_ebs_csi_driver_helm_config = { values = [file("./config/override-aws-ebs-csi-driver.yaml")] } + calico_tigera_helm_config = { values = [file("./config/calico-tigera-values.yaml")] } + istio_ingress_helm_config = { values = [file("./config/istio/override-values.yaml")] } + kiali_server_helm_config = { values = [file("./config/kiali/override-values.yaml")] } + external_secrets_helm_config = { values = [file("./config/external-secret/override-values.yaml")] } + ingress_nginx_helm_config = { values = [file("./config/override-ingress-nginx.yaml")] } + kubeclarity_helm_config = { values = [file("./config/override-kubeclarity.yaml")] } + fluent_bit_helm_config = { values = [file("./config/override-fluent-bit.yaml")] } + velero_helm_config = { values = [file("./config/override-velero.yaml")] } + new_relic_helm_config = { values = [file("./config/override-new-relic.yaml")] } + kube_state_metrics_helm_config = { values = [file("./config/override-kube-state-matrics.yaml")] } + keda_helm_config = { values = [file("./config/keda/override-keda.yaml")] } + certification_manager_helm_config = { values = [file("./config/override-certification-manager.yaml")] } + filebeat_helm_config = { values = [file("./config/override-filebeat.yaml")] } + reloader_helm_config = { values = [file("./config/reloader/override-reloader.yaml")] } + external_dns_helm_config = { values = [file("./config/override-external-dns.yaml")] } + redis_helm_config = { values = [file("./config/override-redis.yaml")] } + actions_runner_controller_helm_config = { values = [file("./config/override-actions-runner-controller.yaml")] } + prometheus_helm_config = { values = [file("./config/override-prometheus.yaml")] } + prometheus_cloudwatch_exporter_helm_config = { values = [file("./config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml")] } + prometheus_cloudwatch_exporter_secret_manifest = file("./config/prometheus-cloudwatch-exporter/secret.yaml") # Uncomment this to use Secret Based Authentication and Update Secret manifest with real credentials # -- Override Helm Release attributes metrics_server_extra_configs = var.metrics_server_extra_configs diff --git a/_examples/complete/plan.txt b/_examples/complete/plan.txt new file mode 100644 index 0000000..b0944f2 --- /dev/null +++ b/_examples/complete/plan.txt @@ -0,0 +1,2054 @@ +module.vpc_cni_irsa.data.aws_region.current: Reading... +data.aws_availability_zones.available: Reading... +module.eks.data.aws_caller_identity.current: Reading... +module.vpc_cni_irsa.data.aws_partition.current: Reading... +module.vpc_cni_irsa.data.aws_caller_identity.current: Reading... +module.eks.data.aws_partition.current: Reading... +module.eks.module.kms.data.aws_caller_identity.current: Reading... +module.vpc_cni_irsa.data.aws_partition.current: Read complete after 0s [id=aws] +module.vpc_cni_irsa.data.aws_region.current: Read complete after 0s [id=us-east-1] +module.eks.data.aws_partition.current: Read complete after 0s [id=aws] +module.eks.module.kms.data.aws_partition.current: Reading... +module.eks.module.eks_managed_node_group["critical"].data.aws_caller_identity.current: Reading... +module.eks.module.eks_managed_node_group["application"].data.aws_caller_identity.current: Reading... +module.eks.module.kms.data.aws_partition.current: Read complete after 0s [id=aws] +module.eks.module.eks_managed_node_group["application"].data.aws_partition.current: Reading... +module.eks.module.eks_managed_node_group["critical"].data.aws_partition.current: Reading... +module.eks.module.eks_managed_node_group["application"].data.aws_partition.current: Read complete after 0s [id=aws] +module.eks.module.eks_managed_node_group["critical"].data.aws_partition.current: Read complete after 0s [id=aws] +module.vpc_cni_irsa.data.aws_iam_policy_document.vpc_cni[0]: Reading... +module.eks.data.aws_iam_policy_document.assume_role_policy[0]: Reading... +module.vpc_cni_irsa.data.aws_iam_policy_document.vpc_cni[0]: Read complete after 0s [id=572553129] +module.eks.data.aws_iam_policy_document.assume_role_policy[0]: Read complete after 0s [id=1530481229] +module.eks.data.aws_caller_identity.current: Read complete after 0s [id=924144197303] +module.eks.data.aws_iam_session_context.current: Reading... +module.eks.module.kms.data.aws_caller_identity.current: Read complete after 0s [id=924144197303] +module.vpc_cni_irsa.data.aws_caller_identity.current: Read complete after 0s [id=924144197303] +module.eks.module.eks_managed_node_group["application"].data.aws_caller_identity.current: Read complete after 1s [id=924144197303] +module.eks.module.eks_managed_node_group["critical"].data.aws_caller_identity.current: Read complete after 1s [id=924144197303] +data.aws_availability_zones.available: Read complete after 1s [id=us-east-1] +module.eks.data.aws_iam_session_context.current: Read complete after 1s [id=arn:aws:sts::924144197303:assumed-role/AWSReservedSSO_AdministratorAccess_3b5b668e6e5741c8/nilesh.gadgi@clouddrove.com] +module.eks.module.eks_managed_node_group["critical"].data.aws_iam_policy_document.assume_role_policy[0]: Reading... +module.eks.module.eks_managed_node_group["application"].data.aws_iam_policy_document.assume_role_policy[0]: Reading... +module.eks.module.eks_managed_node_group["critical"].data.aws_iam_policy_document.assume_role_policy[0]: Read complete after 0s [id=1734879000] +module.eks.module.eks_managed_node_group["application"].data.aws_iam_policy_document.assume_role_policy[0]: Read complete after 0s [id=1734879000] + +Terraform used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: + + create + <= read (data resources) + +Terraform will perform the following actions: + + # data.aws_eks_cluster.eks_cluster will be read during apply + # (depends on a resource or a module with changes pending) + <= data "aws_eks_cluster" "eks_cluster" { + + access_config = (known after apply) + + arn = (known after apply) + + certificate_authority = (known after apply) + + cluster_id = (known after apply) + + created_at = (known after apply) + + enabled_cluster_log_types = (known after apply) + + endpoint = (known after apply) + + id = (known after apply) + + identity = (known after apply) + + kubernetes_network_config = (known after apply) + + name = "helm-addons-cluster" + + outpost_config = (known after apply) + + platform_version = (known after apply) + + role_arn = (known after apply) + + status = (known after apply) + + tags = (known after apply) + + version = (known after apply) + + vpc_config = (known after apply) + } + + # data.aws_eks_cluster_auth.eks_cluster will be read during apply + # (config refers to values not yet known) + <= data "aws_eks_cluster_auth" "eks_cluster" { + + id = (known after apply) + + name = (known after apply) + + token = (sensitive value) + } + + # aws_iam_policy.node_additional will be created + + resource "aws_iam_policy" "node_additional" { + + arn = (known after apply) + + description = "Example usage of node additional policy" + + id = (known after apply) + + name = "helm-addons-additional" + + name_prefix = (known after apply) + + path = "/" + + policy = jsonencode( + { + + Statement = [ + + { + + Action = [ + + "ec2:Describe*", + ] + + Effect = "Allow" + + Resource = "*" + }, + ] + + Version = "2012-10-17" + } + ) + + policy_id = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + } + + # module.addons.data.aws_caller_identity.current will be read during apply + # (depends on a resource or a module with changes pending) + <= data "aws_caller_identity" "current" { + + account_id = (known after apply) + + arn = (known after apply) + + id = (known after apply) + + user_id = (known after apply) + } + + # module.addons.data.aws_eks_cluster.eks_cluster will be read during apply + # (depends on a resource or a module with changes pending) + <= data "aws_eks_cluster" "eks_cluster" { + + access_config = (known after apply) + + arn = (known after apply) + + certificate_authority = (known after apply) + + cluster_id = (known after apply) + + created_at = (known after apply) + + enabled_cluster_log_types = (known after apply) + + endpoint = (known after apply) + + id = (known after apply) + + identity = (known after apply) + + kubernetes_network_config = (known after apply) + + name = "helm-addons-cluster" + + outpost_config = (known after apply) + + platform_version = (known after apply) + + role_arn = (known after apply) + + status = (known after apply) + + tags = (known after apply) + + version = (known after apply) + + vpc_config = (known after apply) + } + + # module.addons.data.aws_partition.current will be read during apply + # (depends on a resource or a module with changes pending) + <= data "aws_partition" "current" { + + dns_suffix = (known after apply) + + id = (known after apply) + + partition = (known after apply) + + reverse_dns_prefix = (known after apply) + } + + # module.addons.data.aws_region.current will be read during apply + # (depends on a resource or a module with changes pending) + <= data "aws_region" "current" { + + description = (known after apply) + + endpoint = (known after apply) + + id = (known after apply) + + name = (known after apply) + } + + # module.addons.time_sleep.dataplane will be created + + resource "time_sleep" "dataplane" { + + create_duration = "10s" + + id = (known after apply) + + triggers = { + + "data_plane_wait_arn" = "" + + "eks_cluster_id" = (known after apply) + } + } + + # module.eks.data.aws_eks_addon_version.this["vpc-cni"] will be read during apply + # (depends on a resource or a module with changes pending) + <= data "aws_eks_addon_version" "this" { + + addon_name = "vpc-cni" + + id = (known after apply) + + kubernetes_version = "1.29" + + most_recent = true + + version = (known after apply) + } + + # module.eks.data.tls_certificate.this[0] will be read during apply + # (config refers to values not yet known) + <= data "tls_certificate" "this" { + + certificates = (known after apply) + + id = (known after apply) + + url = (known after apply) + } + + # module.eks.aws_cloudwatch_log_group.this[0] will be created + + resource "aws_cloudwatch_log_group" "this" { + + arn = (known after apply) + + id = (known after apply) + + log_group_class = (known after apply) + + name = "/aws/eks/helm-addons-cluster/cluster" + + name_prefix = (known after apply) + + retention_in_days = 90 + + skip_destroy = false + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "/aws/eks/helm-addons-cluster/cluster" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "/aws/eks/helm-addons-cluster/cluster" + } + } + + # module.eks.aws_ec2_tag.cluster_primary_security_group["Environment"] will be created + + resource "aws_ec2_tag" "cluster_primary_security_group" { + + id = (known after apply) + + key = "Environment" + + resource_id = (known after apply) + + value = "test" + } + + # module.eks.aws_ec2_tag.cluster_primary_security_group["GithubOrg"] will be created + + resource "aws_ec2_tag" "cluster_primary_security_group" { + + id = (known after apply) + + key = "GithubOrg" + + resource_id = (known after apply) + + value = "clouddrove" + } + + # module.eks.aws_ec2_tag.cluster_primary_security_group["GithubRepo"] will be created + + resource "aws_ec2_tag" "cluster_primary_security_group" { + + id = (known after apply) + + key = "GithubRepo" + + resource_id = (known after apply) + + value = "terraform-helm-eks-addons" + } + + # module.eks.aws_eks_addon.before_compute["vpc-cni"] will be created + + resource "aws_eks_addon" "before_compute" { + + addon_name = "vpc-cni" + + addon_version = (known after apply) + + arn = (known after apply) + + cluster_name = "helm-addons-cluster" + + configuration_values = jsonencode( + { + + env = { + + ENABLE_PREFIX_DELEGATION = "true" + + WARM_PREFIX_TARGET = "1" + } + } + ) + + created_at = (known after apply) + + id = (known after apply) + + modified_at = (known after apply) + + resolve_conflicts = "OVERWRITE" + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + + timeouts {} + } + + # module.eks.aws_eks_cluster.this[0] will be created + + resource "aws_eks_cluster" "this" { + + arn = (known after apply) + + certificate_authority = (known after apply) + + cluster_id = (known after apply) + + created_at = (known after apply) + + enabled_cluster_log_types = [ + + "api", + + "audit", + + "authenticator", + ] + + endpoint = (known after apply) + + id = (known after apply) + + identity = (known after apply) + + name = "helm-addons-cluster" + + platform_version = (known after apply) + + role_arn = (known after apply) + + status = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + version = "1.29" + + + encryption_config { + + resources = [ + + "secrets", + ] + + + provider { + + key_arn = (known after apply) + } + } + + + kubernetes_network_config { + + ip_family = "ipv4" + + service_ipv4_cidr = (known after apply) + + service_ipv6_cidr = (known after apply) + } + + + timeouts {} + + + vpc_config { + + cluster_security_group_id = (known after apply) + + endpoint_private_access = true + + endpoint_public_access = true + + public_access_cidrs = [ + + "0.0.0.0/0", + ] + + security_group_ids = (known after apply) + + subnet_ids = (known after apply) + + vpc_id = (known after apply) + } + } + + # module.eks.aws_iam_openid_connect_provider.oidc_provider[0] will be created + + resource "aws_iam_openid_connect_provider" "oidc_provider" { + + arn = (known after apply) + + client_id_list = [ + + "sts.amazonaws.com", + ] + + id = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + thumbprint_list = (known after apply) + + url = (known after apply) + } + + # module.eks.aws_iam_policy.cluster_encryption[0] will be created + + resource "aws_iam_policy" "cluster_encryption" { + + arn = (known after apply) + + description = "Cluster encryption policy to allow cluster role to utilize CMK provided" + + id = (known after apply) + + name = (known after apply) + + name_prefix = "helm-addons-cluster-cluster-ClusterEncryption" + + path = "/" + + policy = (known after apply) + + policy_id = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + } + + # module.eks.aws_iam_role.this[0] will be created + + resource "aws_iam_role" "this" { + + arn = (known after apply) + + assume_role_policy = jsonencode( + { + + Statement = [ + + { + + Action = "sts:AssumeRole" + + Effect = "Allow" + + Principal = { + + Service = "eks.amazonaws.com" + } + + Sid = "EKSClusterAssumeRole" + }, + ] + + Version = "2012-10-17" + } + ) + + create_date = (known after apply) + + force_detach_policies = true + + id = (known after apply) + + managed_policy_arns = (known after apply) + + max_session_duration = 3600 + + name = (known after apply) + + name_prefix = "helm-addons-cluster-cluster-" + + path = "/" + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + unique_id = (known after apply) + + + inline_policy { + + name = "helm-addons-cluster-cluster" + + policy = jsonencode( + { + + Statement = [ + + { + + Action = [ + + "logs:CreateLogGroup", + ] + + Effect = "Deny" + + Resource = "*" + }, + ] + + Version = "2012-10-17" + } + ) + } + } + + # module.eks.aws_iam_role_policy_attachment.cluster_encryption[0] will be created + + resource "aws_iam_role_policy_attachment" "cluster_encryption" { + + id = (known after apply) + + policy_arn = (known after apply) + + role = (known after apply) + } + + # module.eks.aws_iam_role_policy_attachment.this["AmazonEKSClusterPolicy"] will be created + + resource "aws_iam_role_policy_attachment" "this" { + + id = (known after apply) + + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" + + role = (known after apply) + } + + # module.eks.aws_iam_role_policy_attachment.this["AmazonEKSVPCResourceController"] will be created + + resource "aws_iam_role_policy_attachment" "this" { + + id = (known after apply) + + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController" + + role = (known after apply) + } + + # module.eks.aws_security_group.cluster[0] will be created + + resource "aws_security_group" "cluster" { + + arn = (known after apply) + + description = "EKS cluster security group" + + egress = (known after apply) + + id = (known after apply) + + ingress = (known after apply) + + name = (known after apply) + + name_prefix = "helm-addons-cluster-cluster-" + + owner_id = (known after apply) + + revoke_rules_on_delete = false + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons-cluster-cluster" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons-cluster-cluster" + } + + vpc_id = (known after apply) + } + + # module.eks.aws_security_group.node[0] will be created + + resource "aws_security_group" "node" { + + arn = (known after apply) + + description = "EKS node shared security group" + + egress = (known after apply) + + id = (known after apply) + + ingress = (known after apply) + + name = (known after apply) + + name_prefix = "helm-addons-cluster-node-" + + owner_id = (known after apply) + + revoke_rules_on_delete = false + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons-cluster-node" + + "kubernetes.io/cluster/helm-addons-cluster" = "owned" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons-cluster-node" + + "kubernetes.io/cluster/helm-addons-cluster" = "owned" + } + + vpc_id = (known after apply) + } + + # module.eks.aws_security_group_rule.cluster["ingress_nodes_443"] will be created + + resource "aws_security_group_rule" "cluster" { + + description = "Node groups to cluster API" + + from_port = 443 + + id = (known after apply) + + protocol = "tcp" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = false + + source_security_group_id = (known after apply) + + to_port = 443 + + type = "ingress" + } + + # module.eks.aws_security_group_rule.node["egress_all"] will be created + + resource "aws_security_group_rule" "node" { + + cidr_blocks = [ + + "0.0.0.0/0", + ] + + description = "Allow all egress" + + from_port = 0 + + id = (known after apply) + + prefix_list_ids = [] + + protocol = "-1" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = false + + source_security_group_id = (known after apply) + + to_port = 0 + + type = "egress" + } + + # module.eks.aws_security_group_rule.node["ingress_cluster_443"] will be created + + resource "aws_security_group_rule" "node" { + + description = "Cluster API to node groups" + + from_port = 443 + + id = (known after apply) + + prefix_list_ids = [] + + protocol = "tcp" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = false + + source_security_group_id = (known after apply) + + to_port = 443 + + type = "ingress" + } + + # module.eks.aws_security_group_rule.node["ingress_cluster_4443_webhook"] will be created + + resource "aws_security_group_rule" "node" { + + description = "Cluster API to node 4443/tcp webhook" + + from_port = 4443 + + id = (known after apply) + + prefix_list_ids = [] + + protocol = "tcp" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = false + + source_security_group_id = (known after apply) + + to_port = 4443 + + type = "ingress" + } + + # module.eks.aws_security_group_rule.node["ingress_cluster_6443_webhook"] will be created + + resource "aws_security_group_rule" "node" { + + description = "Cluster API to node 6443/tcp webhook" + + from_port = 6443 + + id = (known after apply) + + prefix_list_ids = [] + + protocol = "tcp" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = false + + source_security_group_id = (known after apply) + + to_port = 6443 + + type = "ingress" + } + + # module.eks.aws_security_group_rule.node["ingress_cluster_8443_webhook"] will be created + + resource "aws_security_group_rule" "node" { + + description = "Cluster API to node 8443/tcp webhook" + + from_port = 8443 + + id = (known after apply) + + prefix_list_ids = [] + + protocol = "tcp" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = false + + source_security_group_id = (known after apply) + + to_port = 8443 + + type = "ingress" + } + + # module.eks.aws_security_group_rule.node["ingress_cluster_9443_webhook"] will be created + + resource "aws_security_group_rule" "node" { + + description = "Cluster API to node 9443/tcp webhook" + + from_port = 9443 + + id = (known after apply) + + prefix_list_ids = [] + + protocol = "tcp" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = false + + source_security_group_id = (known after apply) + + to_port = 9443 + + type = "ingress" + } + + # module.eks.aws_security_group_rule.node["ingress_cluster_kubelet"] will be created + + resource "aws_security_group_rule" "node" { + + description = "Cluster API to node kubelets" + + from_port = 10250 + + id = (known after apply) + + prefix_list_ids = [] + + protocol = "tcp" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = false + + source_security_group_id = (known after apply) + + to_port = 10250 + + type = "ingress" + } + + # module.eks.aws_security_group_rule.node["ingress_nodes_ephemeral"] will be created + + resource "aws_security_group_rule" "node" { + + description = "Node to node ingress on ephemeral ports" + + from_port = 1025 + + id = (known after apply) + + prefix_list_ids = [] + + protocol = "tcp" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = true + + source_security_group_id = (known after apply) + + to_port = 65535 + + type = "ingress" + } + + # module.eks.aws_security_group_rule.node["ingress_self_coredns_tcp"] will be created + + resource "aws_security_group_rule" "node" { + + description = "Node to node CoreDNS" + + from_port = 53 + + id = (known after apply) + + prefix_list_ids = [] + + protocol = "tcp" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = true + + source_security_group_id = (known after apply) + + to_port = 53 + + type = "ingress" + } + + # module.eks.aws_security_group_rule.node["ingress_self_coredns_udp"] will be created + + resource "aws_security_group_rule" "node" { + + description = "Node to node CoreDNS UDP" + + from_port = 53 + + id = (known after apply) + + prefix_list_ids = [] + + protocol = "udp" + + security_group_id = (known after apply) + + security_group_rule_id = (known after apply) + + self = true + + source_security_group_id = (known after apply) + + to_port = 53 + + type = "ingress" + } + + # module.eks.time_sleep.this[0] will be created + + resource "time_sleep" "this" { + + create_duration = "30s" + + id = (known after apply) + + triggers = { + + "cluster_certificate_authority_data" = (known after apply) + + "cluster_endpoint" = (known after apply) + + "cluster_name" = "helm-addons-cluster" + + "cluster_version" = "1.29" + } + } + + # module.vpc.aws_default_network_acl.this[0] will be created + + resource "aws_default_network_acl" "this" { + + arn = (known after apply) + + default_network_acl_id = (known after apply) + + id = (known after apply) + + owner_id = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + vpc_id = (known after apply) + + + egress { + + action = "allow" + + from_port = 0 + + ipv6_cidr_block = "::/0" + + protocol = "-1" + + rule_no = 101 + + to_port = 0 + } + + egress { + + action = "allow" + + cidr_block = "0.0.0.0/0" + + from_port = 0 + + protocol = "-1" + + rule_no = 100 + + to_port = 0 + } + + + ingress { + + action = "allow" + + from_port = 0 + + ipv6_cidr_block = "::/0" + + protocol = "-1" + + rule_no = 101 + + to_port = 0 + } + + ingress { + + action = "allow" + + cidr_block = "0.0.0.0/0" + + from_port = 0 + + protocol = "-1" + + rule_no = 100 + + to_port = 0 + } + } + + # module.vpc.aws_default_route_table.default[0] will be created + + resource "aws_default_route_table" "default" { + + arn = (known after apply) + + default_route_table_id = (known after apply) + + id = (known after apply) + + owner_id = (known after apply) + + route = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + vpc_id = (known after apply) + + + timeouts { + + create = "5m" + + update = "5m" + } + } + + # module.vpc.aws_default_security_group.this[0] will be created + + resource "aws_default_security_group" "this" { + + arn = (known after apply) + + description = (known after apply) + + egress = (known after apply) + + id = (known after apply) + + ingress = (known after apply) + + name = (known after apply) + + name_prefix = (known after apply) + + owner_id = (known after apply) + + revoke_rules_on_delete = false + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + vpc_id = (known after apply) + } + + # module.vpc.aws_eip.nat[0] will be created + + resource "aws_eip" "nat" { + + allocation_id = (known after apply) + + association_id = (known after apply) + + carrier_ip = (known after apply) + + customer_owned_ip = (known after apply) + + domain = "vpc" + + id = (known after apply) + + instance = (known after apply) + + network_border_group = (known after apply) + + network_interface = (known after apply) + + private_dns = (known after apply) + + private_ip = (known after apply) + + public_dns = (known after apply) + + public_ip = (known after apply) + + public_ipv4_pool = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + vpc = (known after apply) + } + + # module.vpc.aws_internet_gateway.this[0] will be created + + resource "aws_internet_gateway" "this" { + + arn = (known after apply) + + id = (known after apply) + + owner_id = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + vpc_id = (known after apply) + } + + # module.vpc.aws_nat_gateway.this[0] will be created + + resource "aws_nat_gateway" "this" { + + allocation_id = (known after apply) + + association_id = (known after apply) + + connectivity_type = "public" + + id = (known after apply) + + network_interface_id = (known after apply) + + private_ip = (known after apply) + + public_ip = (known after apply) + + secondary_private_ip_address_count = (known after apply) + + secondary_private_ip_addresses = (known after apply) + + subnet_id = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + } + + # module.vpc.aws_route.private_nat_gateway[0] will be created + + resource "aws_route" "private_nat_gateway" { + + destination_cidr_block = "0.0.0.0/0" + + id = (known after apply) + + instance_id = (known after apply) + + instance_owner_id = (known after apply) + + nat_gateway_id = (known after apply) + + network_interface_id = (known after apply) + + origin = (known after apply) + + route_table_id = (known after apply) + + state = (known after apply) + + + timeouts { + + create = "5m" + } + } + + # module.vpc.aws_route.public_internet_gateway[0] will be created + + resource "aws_route" "public_internet_gateway" { + + destination_cidr_block = "0.0.0.0/0" + + gateway_id = (known after apply) + + id = (known after apply) + + instance_id = (known after apply) + + instance_owner_id = (known after apply) + + network_interface_id = (known after apply) + + origin = (known after apply) + + route_table_id = (known after apply) + + state = (known after apply) + + + timeouts { + + create = "5m" + } + } + + # module.vpc.aws_route_table.private[0] will be created + + resource "aws_route_table" "private" { + + arn = (known after apply) + + id = (known after apply) + + owner_id = (known after apply) + + propagating_vgws = (known after apply) + + route = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + vpc_id = (known after apply) + } + + # module.vpc.aws_route_table.public[0] will be created + + resource "aws_route_table" "public" { + + arn = (known after apply) + + id = (known after apply) + + owner_id = (known after apply) + + propagating_vgws = (known after apply) + + route = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + vpc_id = (known after apply) + } + + # module.vpc.aws_route_table_association.database[0] will be created + + resource "aws_route_table_association" "database" { + + id = (known after apply) + + route_table_id = (known after apply) + + subnet_id = (known after apply) + } + + # module.vpc.aws_route_table_association.database[1] will be created + + resource "aws_route_table_association" "database" { + + id = (known after apply) + + route_table_id = (known after apply) + + subnet_id = (known after apply) + } + + # module.vpc.aws_route_table_association.database[2] will be created + + resource "aws_route_table_association" "database" { + + id = (known after apply) + + route_table_id = (known after apply) + + subnet_id = (known after apply) + } + + # module.vpc.aws_route_table_association.private[0] will be created + + resource "aws_route_table_association" "private" { + + id = (known after apply) + + route_table_id = (known after apply) + + subnet_id = (known after apply) + } + + # module.vpc.aws_route_table_association.private[1] will be created + + resource "aws_route_table_association" "private" { + + id = (known after apply) + + route_table_id = (known after apply) + + subnet_id = (known after apply) + } + + # module.vpc.aws_route_table_association.private[2] will be created + + resource "aws_route_table_association" "private" { + + id = (known after apply) + + route_table_id = (known after apply) + + subnet_id = (known after apply) + } + + # module.vpc.aws_route_table_association.public[0] will be created + + resource "aws_route_table_association" "public" { + + id = (known after apply) + + route_table_id = (known after apply) + + subnet_id = (known after apply) + } + + # module.vpc.aws_route_table_association.public[1] will be created + + resource "aws_route_table_association" "public" { + + id = (known after apply) + + route_table_id = (known after apply) + + subnet_id = (known after apply) + } + + # module.vpc.aws_route_table_association.public[2] will be created + + resource "aws_route_table_association" "public" { + + id = (known after apply) + + route_table_id = (known after apply) + + subnet_id = (known after apply) + } + + # module.vpc.aws_subnet.database[0] will be created + + resource "aws_subnet" "database" { + + arn = (known after apply) + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-1a" + + availability_zone_id = (known after apply) + + cidr_block = "10.0.8.0/24" + + enable_dns64 = false + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = (known after apply) + + ipv6_cidr_block_association_id = (known after apply) + + ipv6_native = false + + map_public_ip_on_launch = false + + owner_id = (known after apply) + + private_dns_hostname_type_on_launch = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + vpc_id = (known after apply) + } + + # module.vpc.aws_subnet.database[1] will be created + + resource "aws_subnet" "database" { + + arn = (known after apply) + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-1b" + + availability_zone_id = (known after apply) + + cidr_block = "10.0.9.0/24" + + enable_dns64 = false + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = (known after apply) + + ipv6_cidr_block_association_id = (known after apply) + + ipv6_native = false + + map_public_ip_on_launch = false + + owner_id = (known after apply) + + private_dns_hostname_type_on_launch = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + vpc_id = (known after apply) + } + + # module.vpc.aws_subnet.database[2] will be created + + resource "aws_subnet" "database" { + + arn = (known after apply) + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-1c" + + availability_zone_id = (known after apply) + + cidr_block = "10.0.10.0/24" + + enable_dns64 = false + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = (known after apply) + + ipv6_cidr_block_association_id = (known after apply) + + ipv6_native = false + + map_public_ip_on_launch = false + + owner_id = (known after apply) + + private_dns_hostname_type_on_launch = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + vpc_id = (known after apply) + } + + # module.vpc.aws_subnet.private[0] will be created + + resource "aws_subnet" "private" { + + arn = (known after apply) + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-1a" + + availability_zone_id = (known after apply) + + cidr_block = "10.0.0.0/24" + + enable_dns64 = false + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = (known after apply) + + ipv6_cidr_block_association_id = (known after apply) + + ipv6_native = false + + map_public_ip_on_launch = false + + owner_id = (known after apply) + + private_dns_hostname_type_on_launch = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + + "kubernetes.io/role/internal-elb" = "1" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + + "kubernetes.io/role/internal-elb" = "1" + } + + vpc_id = (known after apply) + } + + # module.vpc.aws_subnet.private[1] will be created + + resource "aws_subnet" "private" { + + arn = (known after apply) + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-1b" + + availability_zone_id = (known after apply) + + cidr_block = "10.0.1.0/24" + + enable_dns64 = false + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = (known after apply) + + ipv6_cidr_block_association_id = (known after apply) + + ipv6_native = false + + map_public_ip_on_launch = false + + owner_id = (known after apply) + + private_dns_hostname_type_on_launch = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + + "kubernetes.io/role/internal-elb" = "1" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + + "kubernetes.io/role/internal-elb" = "1" + } + + vpc_id = (known after apply) + } + + # module.vpc.aws_subnet.private[2] will be created + + resource "aws_subnet" "private" { + + arn = (known after apply) + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-1c" + + availability_zone_id = (known after apply) + + cidr_block = "10.0.2.0/24" + + enable_dns64 = false + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = (known after apply) + + ipv6_cidr_block_association_id = (known after apply) + + ipv6_native = false + + map_public_ip_on_launch = false + + owner_id = (known after apply) + + private_dns_hostname_type_on_launch = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + + "kubernetes.io/role/internal-elb" = "1" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + + "kubernetes.io/role/internal-elb" = "1" + } + + vpc_id = (known after apply) + } + + # module.vpc.aws_subnet.public[0] will be created + + resource "aws_subnet" "public" { + + arn = (known after apply) + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-1a" + + availability_zone_id = (known after apply) + + cidr_block = "10.0.4.0/24" + + enable_dns64 = false + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = (known after apply) + + ipv6_cidr_block_association_id = (known after apply) + + ipv6_native = false + + map_public_ip_on_launch = false + + owner_id = (known after apply) + + private_dns_hostname_type_on_launch = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + + "kubernetes.io/role/elb" = "1" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + + "kubernetes.io/role/elb" = "1" + } + + vpc_id = (known after apply) + } + + # module.vpc.aws_subnet.public[1] will be created + + resource "aws_subnet" "public" { + + arn = (known after apply) + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-1b" + + availability_zone_id = (known after apply) + + cidr_block = "10.0.5.0/24" + + enable_dns64 = false + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = (known after apply) + + ipv6_cidr_block_association_id = (known after apply) + + ipv6_native = false + + map_public_ip_on_launch = false + + owner_id = (known after apply) + + private_dns_hostname_type_on_launch = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + + "kubernetes.io/role/elb" = "1" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + + "kubernetes.io/role/elb" = "1" + } + + vpc_id = (known after apply) + } + + # module.vpc.aws_subnet.public[2] will be created + + resource "aws_subnet" "public" { + + arn = (known after apply) + + assign_ipv6_address_on_creation = false + + availability_zone = "us-east-1c" + + availability_zone_id = (known after apply) + + cidr_block = "10.0.6.0/24" + + enable_dns64 = false + + enable_resource_name_dns_a_record_on_launch = false + + enable_resource_name_dns_aaaa_record_on_launch = false + + id = (known after apply) + + ipv6_cidr_block_association_id = (known after apply) + + ipv6_native = false + + map_public_ip_on_launch = false + + owner_id = (known after apply) + + private_dns_hostname_type_on_launch = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + + "kubernetes.io/role/elb" = "1" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + + "kubernetes.io/role/elb" = "1" + } + + vpc_id = (known after apply) + } + + # module.vpc.aws_vpc.this[0] will be created + + resource "aws_vpc" "this" { + + arn = (known after apply) + + cidr_block = "10.0.0.0/16" + + default_network_acl_id = (known after apply) + + default_route_table_id = (known after apply) + + default_security_group_id = (known after apply) + + dhcp_options_id = (known after apply) + + enable_dns_hostnames = true + + enable_dns_support = true + + enable_network_address_usage_metrics = (known after apply) + + id = (known after apply) + + instance_tenancy = "default" + + ipv6_association_id = (known after apply) + + ipv6_cidr_block = (known after apply) + + ipv6_cidr_block_network_border_group = (known after apply) + + main_route_table_id = (known after apply) + + owner_id = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + } + + # module.vpc_cni_irsa.data.aws_iam_policy_document.this[0] will be read during apply + # (config refers to values not yet known) + <= data "aws_iam_policy_document" "this" { + + id = (known after apply) + + json = (known after apply) + + + statement { + + actions = [ + + "sts:AssumeRoleWithWebIdentity", + ] + + effect = "Allow" + + + condition { + + test = "StringEquals" + + values = [ + + "sts.amazonaws.com", + ] + + variable = (known after apply) + } + + condition { + + test = "StringEquals" + + values = [ + + "system:serviceaccount:kube-system:aws-node", + ] + + variable = (known after apply) + } + + + principals { + + identifiers = [ + + (known after apply), + ] + + type = "Federated" + } + } + } + + # module.vpc_cni_irsa.aws_iam_policy.vpc_cni[0] will be created + + resource "aws_iam_policy" "vpc_cni" { + + arn = (known after apply) + + description = "Provides the Amazon VPC CNI Plugin (amazon-vpc-cni-k8s) the permissions it requires to modify the IPv4/IPv6 address configuration on your EKS worker nodes" + + id = (known after apply) + + name = (known after apply) + + name_prefix = "AmazonEKS_CNI_Policy-" + + path = "/" + + policy = jsonencode( + { + + Statement = [ + + { + + Action = [ + + "ec2:DescribeTags", + + "ec2:DescribeNetworkInterfaces", + + "ec2:DescribeInstances", + + "ec2:DescribeInstanceTypes", + + "ec2:AssignIpv6Addresses", + ] + + Effect = "Allow" + + Resource = "*" + + Sid = "IPV6" + }, + + { + + Action = "ec2:CreateTags" + + Effect = "Allow" + + Resource = "arn:aws:ec2:*:*:network-interface/*" + + Sid = "CreateTags" + }, + ] + + Version = "2012-10-17" + } + ) + + policy_id = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + } + + # module.vpc_cni_irsa.aws_iam_role.this[0] will be created + + resource "aws_iam_role" "this" { + + arn = (known after apply) + + assume_role_policy = (known after apply) + + create_date = (known after apply) + + force_detach_policies = true + + id = (known after apply) + + managed_policy_arns = (known after apply) + + max_session_duration = 3600 + + name = (known after apply) + + name_prefix = "VPC-CNI-IRSA" + + path = "/" + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + unique_id = (known after apply) + } + + # module.vpc_cni_irsa.aws_iam_role_policy_attachment.vpc_cni[0] will be created + + resource "aws_iam_role_policy_attachment" "vpc_cni" { + + id = (known after apply) + + policy_arn = (known after apply) + + role = (known after apply) + } + + # module.addons.module.prometheus_cloudwatch_exporter[0].aws_iam_policy.policy will be created + + resource "aws_iam_policy" "policy" { + + arn = (known after apply) + + description = "IAM Policy used by prometheus-cloudwatch-exporter-helm-addons-cluster IAM Role" + + id = (known after apply) + + name = "prometheus-cloudwatch-exporter-helm-addons-cluster" + + name_prefix = (known after apply) + + path = "/" + + policy = jsonencode( + { + + Statement = [ + + { + + Action = [ + + "cloudwatch:ListMetrics", + + "cloudwatch:GetMetricStatistics", + + "cloudwatch:GetMetricData", + ] + + Effect = "Allow" + + Resource = "*" + + Sid = "AllowCloudwatch" + }, + + { + + Action = [ + + "tag:GetResources", + ] + + Effect = "Allow" + + Resource = "*" + + Sid = "AllowResourceTagging" + }, + ] + + Version = "2012-10-17" + } + ) + + policy_id = (known after apply) + + tags_all = (known after apply) + } + + # module.addons.module.prometheus_cloudwatch_exporter[0].kubectl_manifest.secret_manifest[0] will be created + + resource "kubectl_manifest" "secret_manifest" { + + api_version = "v1" + + apply_only = false + + force_conflicts = false + + force_new = false + + id = (known after apply) + + kind = "Secret" + + live_manifest_incluster = (sensitive value) + + live_uid = (known after apply) + + name = "nilesh" + + namespace = "monitoring" + + server_side_apply = false + + uid = (known after apply) + + validate_schema = true + + wait_for_rollout = true + + yaml_body = (sensitive value) + + yaml_body_parsed = <<-EOT + apiVersion: v1 + data: (sensitive value) + kind: Secret + metadata: + name: nilesh + namespace: monitoring + type: Opaque + EOT + + yaml_incluster = (sensitive value) + } + + # module.addons.module.prometheus_cloudwatch_exporter[0].kubernetes_namespace.prometheus_cloudwatch_exporter_namespace will be created + + resource "kubernetes_namespace" "prometheus_cloudwatch_exporter_namespace" { + + id = (known after apply) + + wait_for_default_service_account = false + + + metadata { + + generation = (known after apply) + + name = "monitoring" + + resource_version = (known after apply) + + uid = (known after apply) + } + } + + # module.eks.module.eks_managed_node_group["application"].aws_eks_node_group.this[0] will be created + + resource "aws_eks_node_group" "this" { + + ami_type = "AL2_x86_64" + + arn = (known after apply) + + capacity_type = "SPOT" + + cluster_name = "helm-addons-cluster" + + disk_size = 20 + + id = (known after apply) + + instance_types = [ + + "t3.medium", + ] + + node_group_name = "application" + + node_group_name_prefix = (known after apply) + + node_role_arn = (known after apply) + + release_version = (known after apply) + + resources = (known after apply) + + status = (known after apply) + + subnet_ids = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "application" + + "karpenter.sh/discovery/helm-addons-cluster" = "helm-addons-cluster" + + "kubernetes.io/cluster/helm-addons-cluster" = "shared" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "application" + + "karpenter.sh/discovery/helm-addons-cluster" = "helm-addons-cluster" + + "kubernetes.io/cluster/helm-addons-cluster" = "shared" + } + + version = "1.29" + + + scaling_config { + + desired_size = 0 + + max_size = 1 + + min_size = 0 + } + + + timeouts {} + + + update_config { + + max_unavailable_percentage = 33 + } + } + + # module.eks.module.eks_managed_node_group["application"].aws_iam_role.this[0] will be created + + resource "aws_iam_role" "this" { + + arn = (known after apply) + + assume_role_policy = jsonencode( + { + + Statement = [ + + { + + Action = "sts:AssumeRole" + + Effect = "Allow" + + Principal = { + + Service = "ec2.amazonaws.com" + } + + Sid = "EKSNodeAssumeRole" + }, + ] + + Version = "2012-10-17" + } + ) + + create_date = (known after apply) + + description = "EKS managed node group IAM role" + + force_detach_policies = true + + id = (known after apply) + + managed_policy_arns = (known after apply) + + max_session_duration = 3600 + + name = (known after apply) + + name_prefix = "application-eks-node-group-" + + path = "/" + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + + "karpenter.sh/discovery/helm-addons-cluster" = "helm-addons-cluster" + + "kubernetes.io/cluster/helm-addons-cluster" = "shared" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + + "karpenter.sh/discovery/helm-addons-cluster" = "helm-addons-cluster" + + "kubernetes.io/cluster/helm-addons-cluster" = "shared" + } + + unique_id = (known after apply) + } + + # module.eks.module.eks_managed_node_group["application"].aws_iam_role_policy_attachment.additional["policy_arn"] will be created + + resource "aws_iam_role_policy_attachment" "additional" { + + id = (known after apply) + + policy_arn = (known after apply) + + role = (known after apply) + } + + # module.eks.module.eks_managed_node_group["application"].aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"] will be created + + resource "aws_iam_role_policy_attachment" "this" { + + id = (known after apply) + + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + + role = (known after apply) + } + + # module.eks.module.eks_managed_node_group["application"].aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"] will be created + + resource "aws_iam_role_policy_attachment" "this" { + + id = (known after apply) + + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" + + role = (known after apply) + } + + # module.eks.module.eks_managed_node_group["application"].aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"] will be created + + resource "aws_iam_role_policy_attachment" "this" { + + id = (known after apply) + + policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" + + role = (known after apply) + } + + # module.eks.module.eks_managed_node_group["critical"].aws_eks_node_group.this[0] will be created + + resource "aws_eks_node_group" "this" { + + ami_type = "AL2_x86_64" + + arn = (known after apply) + + capacity_type = "ON_DEMAND" + + cluster_name = "helm-addons-cluster" + + disk_size = 20 + + id = (known after apply) + + instance_types = [ + + "t3.medium", + ] + + node_group_name = "critical" + + node_group_name_prefix = (known after apply) + + node_role_arn = (known after apply) + + release_version = (known after apply) + + resources = (known after apply) + + status = (known after apply) + + subnet_ids = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "critical" + + "karpenter.sh/discovery/helm-addons-cluster" = "helm-addons-cluster" + + "kubernetes.io/cluster/helm-addons-cluster" = "shared" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "critical" + + "karpenter.sh/discovery/helm-addons-cluster" = "helm-addons-cluster" + + "kubernetes.io/cluster/helm-addons-cluster" = "shared" + } + + version = "1.29" + + + scaling_config { + + desired_size = 1 + + max_size = 2 + + min_size = 1 + } + + + timeouts {} + + + update_config { + + max_unavailable_percentage = 33 + } + } + + # module.eks.module.eks_managed_node_group["critical"].aws_iam_role.this[0] will be created + + resource "aws_iam_role" "this" { + + arn = (known after apply) + + assume_role_policy = jsonencode( + { + + Statement = [ + + { + + Action = "sts:AssumeRole" + + Effect = "Allow" + + Principal = { + + Service = "ec2.amazonaws.com" + } + + Sid = "EKSNodeAssumeRole" + }, + ] + + Version = "2012-10-17" + } + ) + + create_date = (known after apply) + + description = "EKS managed node group IAM role" + + force_detach_policies = true + + id = (known after apply) + + managed_policy_arns = (known after apply) + + max_session_duration = 3600 + + name = (known after apply) + + name_prefix = "critical-eks-node-group-" + + path = "/" + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + + "karpenter.sh/discovery/helm-addons-cluster" = "helm-addons-cluster" + + "kubernetes.io/cluster/helm-addons-cluster" = "shared" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + + "karpenter.sh/discovery/helm-addons-cluster" = "helm-addons-cluster" + + "kubernetes.io/cluster/helm-addons-cluster" = "shared" + } + + unique_id = (known after apply) + } + + # module.eks.module.eks_managed_node_group["critical"].aws_iam_role_policy_attachment.additional["policy_arn"] will be created + + resource "aws_iam_role_policy_attachment" "additional" { + + id = (known after apply) + + policy_arn = (known after apply) + + role = (known after apply) + } + + # module.eks.module.eks_managed_node_group["critical"].aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"] will be created + + resource "aws_iam_role_policy_attachment" "this" { + + id = (known after apply) + + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + + role = (known after apply) + } + + # module.eks.module.eks_managed_node_group["critical"].aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"] will be created + + resource "aws_iam_role_policy_attachment" "this" { + + id = (known after apply) + + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" + + role = (known after apply) + } + + # module.eks.module.eks_managed_node_group["critical"].aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"] will be created + + resource "aws_iam_role_policy_attachment" "this" { + + id = (known after apply) + + policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" + + role = (known after apply) + } + + # module.eks.module.kms.data.aws_iam_policy_document.this[0] will be read during apply + # (config refers to values not yet known) + <= data "aws_iam_policy_document" "this" { + + id = (known after apply) + + json = (known after apply) + + override_policy_documents = [] + + source_policy_documents = [] + + + statement { + + actions = [ + + "kms:CancelKeyDeletion", + + "kms:Create*", + + "kms:Delete*", + + "kms:Describe*", + + "kms:Disable*", + + "kms:Enable*", + + "kms:Get*", + + "kms:List*", + + "kms:Put*", + + "kms:Revoke*", + + "kms:ScheduleKeyDeletion", + + "kms:TagResource", + + "kms:UntagResource", + + "kms:Update*", + ] + + resources = [ + + "*", + ] + + sid = "KeyAdministration" + + + principals { + + identifiers = [ + + "arn:aws:iam::924144197303:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO_AdministratorAccess_3b5b668e6e5741c8", + ] + + type = "AWS" + } + } + + statement { + + actions = [ + + "kms:Decrypt", + + "kms:DescribeKey", + + "kms:Encrypt", + + "kms:GenerateDataKey*", + + "kms:ReEncrypt*", + ] + + resources = [ + + "*", + ] + + sid = "KeyUsage" + + + principals { + + identifiers = [ + + (known after apply), + ] + + type = "AWS" + } + } + } + + # module.eks.module.kms.aws_kms_alias.this["cluster"] will be created + + resource "aws_kms_alias" "this" { + + arn = (known after apply) + + id = (known after apply) + + name = "alias/eks/helm-addons-cluster" + + name_prefix = (known after apply) + + target_key_arn = (known after apply) + + target_key_id = (known after apply) + } + + # module.eks.module.kms.aws_kms_key.this[0] will be created + + resource "aws_kms_key" "this" { + + arn = (known after apply) + + bypass_policy_lockout_safety_check = false + + customer_master_key_spec = "SYMMETRIC_DEFAULT" + + description = "helm-addons-cluster cluster encryption key" + + enable_key_rotation = true + + id = (known after apply) + + is_enabled = true + + key_id = (known after apply) + + key_usage = "ENCRYPT_DECRYPT" + + multi_region = false + + policy = (known after apply) + + tags = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + + tags_all = { + + "Environment" = "test" + + "GithubOrg" = "clouddrove" + + "GithubRepo" = "terraform-helm-eks-addons" + + "Name" = "helm-addons" + } + } + + # module.addons.module.prometheus_cloudwatch_exporter[0].module.prometheus_cloudwatch_exporter_secret[0].helm_release.addon[0] will be created + + resource "helm_release" "addon" { + + atomic = true + + chart = "prometheus-cloudwatch-exporter" + + cleanup_on_fail = false + + create_namespace = true + + dependency_update = false + + description = "Prometheus Cloudwatch-Exporter helm Chart deployment configuration" + + disable_crd_hooks = false + + disable_openapi_validation = false + + disable_webhooks = false + + force_update = false + + id = (known after apply) + + lint = false + + manifest = (known after apply) + + max_history = 0 + + metadata = (known after apply) + + name = "prometheus-cloudwatch-exporter" + + namespace = "monitoring" + + pass_credentials = false + + recreate_pods = false + + render_subchart_notes = true + + replace = false + + repository = "https://prometheus-community.github.io/helm-charts" + + reset_values = false + + reuse_values = false + + skip_crds = false + + status = "deployed" + + timeout = 600 + + values = [ + + <<-EOT + ## Node affinity for particular node in which labels key is "Infra-Services" and value is "true" + + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "eks.amazonaws.com/nodegroup" + operator: In + values: + - "critical" + + ## Using limits and requests + resources: + limits: + cpu: 300m + memory: 250Mi + requests: + cpu: 50m + memory: 150Mi + + # Configuration is rendered with `tpl` function, therefore you can use any Helm variables and/or templates here + config: |- + # This is the default configuration for prometheus-cloudwatch-exporter + region: us-east-1 + metrics: + - aws_dimensions: + - InstanceId + aws_metric_name: CPUUtilization + aws_namespace: AWS/EC2 + aws_statistics: + - Average + aws_tag_select: + resource_type_selection: ec2:instance + resource_id_dimension: InstanceId + - aws_dimensions: + - InstanceId + aws_metric_name: NetworkIn + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: NetworkOut + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: NetworkPacketsIn + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: NetworkPacketsOut + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: DiskWriteBytes + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: DiskReadBytes + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: CPUCreditBalance + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: CPUCreditUsage + aws_namespace: AWS/EC2 + aws_statistics: + - Average + - aws_dimensions: + - InstanceId + aws_metric_name: StatusCheckFailed + aws_namespace: AWS/EC2 + aws_statistics: + - Sum + - aws_dimensions: + - InstanceId + aws_metric_name: StatusCheckFailed_Instance + aws_namespace: AWS/EC2 + aws_statistics: + - Sum + - aws_dimensions: + - InstanceId + aws_metric_name: StatusCheckFailed_System + aws_namespace: AWS/EC2 + aws_statistics: + - Sum + EOT, + ] + + verify = false + + version = "0.25.2" + + wait = true + + wait_for_jobs = false + + + postrender {} + + + set { + + name = "aws.secret.name" + + value = "aws" + } + } + +Plan: 78 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + cluster_name = "helm-addons-cluster" + + istio-ingress = "Istio does not support the installation of istio-helmchart in a namespace other than istio-system. We have provided a namespace feature in case Istio-helmchart maintainers fix this issue." + + region = "us-east-1" + + update_kubeconfig = "aws eks update-kubeconfig --name helm-addons-cluster --region us-east-1" + + velero_post_installation = <<-EOT + Once velero server is up and running you need the client before you can use it - + 1. wget https://github.com/vmware-tanzu/velero/releases/download/v1.11.1/velero-v1.11.1-darwin-amd64.tar.gz + 2. tar -xvf velero-v1.11.1-darwin-amd64.tar.gz -C velero-client + EOT +β•· +β”‚ Warning: Argument is deprecated +β”‚  +β”‚  with module.eks.aws_eks_addon.before_compute["vpc-cni"], +β”‚  on .terraform/modules/eks/main.tf line 420, in resource "aws_eks_addon" "before_compute": +β”‚  420: resolve_conflicts = try(each.value.resolve_conflicts, "OVERWRITE") +β”‚  +β”‚ The "resolve_conflicts" attribute can't be set to "PRESERVE" on initial resource creation. Use "resolve_conflicts_on_create" and/or +β”‚ "resolve_conflicts_on_update" instead +β•΅ + +───────────────────────────────────────────────────────────────────────────── + +Note: You didn't use the -out option to save this plan, so Terraform can't +guarantee to take exactly these actions if you run "terraform apply" now. diff --git a/_examples/external-eks/main.tf b/_examples/external-eks/main.tf index e07b598..e8c3fb4 100644 --- a/_examples/external-eks/main.tf +++ b/_examples/external-eks/main.tf @@ -73,7 +73,7 @@ module "addons" { redis_helm_config = { values = [file("./config/override-redis.yaml")] } prometheus_helm_config = { values = [file("./config/override-prometheus.yaml")] } prometheus_cloudwatch_exporter_helm_config = { values = [file("./config/prometheus-cloudwatch-exporter/override-prometheus-cloudwatch-exporter-controller.yaml")] } - prometheus_cloudwatch_exporter_secret_manifest = ["./config/prometheus-cloudwatch-exporter/secret.yaml"] + prometheus_cloudwatch_exporter_secret_manifest = file("./config/prometheus-cloudwatch-exporter/secret.yaml") # -- Override Helm Release attributes metrics_server_extra_configs = var.metrics_server_extra_configs diff --git a/addons/prometheus-cloudwatch-exporter/main.tf b/addons/prometheus-cloudwatch-exporter/main.tf index f8fc2e2..d19605f 100644 --- a/addons/prometheus-cloudwatch-exporter/main.tf +++ b/addons/prometheus-cloudwatch-exporter/main.tf @@ -1,5 +1,5 @@ module "prometheus_cloudwatch_exporter_secret" { - count = length(var.secret_manifest) + count = var.secret_manifest != null ? 1 : 0 source = "../helm" manage_via_gitops = var.manage_via_gitops @@ -12,12 +12,11 @@ module "prometheus_cloudwatch_exporter_secret" { value = "aws" } ] - depends_on = [kubectl_manifest.secret_manifest] } module "prometheus_cloudwatch_exporter_role" { - count = length(var.secret_manifest) == 0 ? 1 : 0 + count = var.secret_manifest == null ? 1 : 0 source = "../helm" manage_via_gitops = var.manage_via_gitops @@ -35,7 +34,6 @@ module "prometheus_cloudwatch_exporter_role" { } ] - # -- IRSA Configurations irsa_config = { irsa_iam_policies = [aws_iam_policy.policy.arn] @@ -50,8 +48,8 @@ module "prometheus_cloudwatch_exporter_role" { # Secret for AWS Authentication with cloudwatch exporter resource "kubectl_manifest" "secret_manifest" { - count = length(var.secret_manifest) - yaml_body = file(var.secret_manifest[count.index]) + count = var.secret_manifest != null ? 1 : 0 + yaml_body = var.secret_manifest depends_on = [kubernetes_namespace.prometheus_cloudwatch_exporter_namespace] } diff --git a/addons/prometheus-cloudwatch-exporter/variables.tf b/addons/prometheus-cloudwatch-exporter/variables.tf index c0c2246..7382fbe 100644 --- a/addons/prometheus-cloudwatch-exporter/variables.tf +++ b/addons/prometheus-cloudwatch-exporter/variables.tf @@ -33,8 +33,8 @@ variable "prometheus_cloudwatch_exporter_extra_configs" { variable "secret_manifest" { description = "Path of Ingress and Gateway yaml manifests" - type = list(any) - default = [] + type = string + default = null } variable "eks_cluster_name" { diff --git a/variables.tf b/variables.tf index 2911574..a06ba22 100644 --- a/variables.tf +++ b/variables.tf @@ -636,9 +636,9 @@ variable "prometheus_cloudwatch_exporter_extra_configs" { } variable "prometheus_cloudwatch_exporter_secret_manifest" { - description = "Path of Ingress and Gateway yaml manifests" - type = list(any) - default = [] + description = "Path of prometheus cloudwatch exporter manifest" + type = string + default = null } variable "prometheus_cloudwatch_exporter_role_iampolicy_json_content" { From 2d31802c7bd925c61de63db20ee58e32628492f9 Mon Sep 17 00:00:00 2001 From: Anmol Nagpal Date: Mon, 5 Feb 2024 20:09:36 +0530 Subject: [PATCH 13/13] fix: remove plan changes --- _examples/complete/plan.txt | 2054 ----------------------------------- 1 file changed, 2054 deletions(-) delete mode 100644 _examples/complete/plan.txt diff --git a/_examples/complete/plan.txt b/_examples/complete/plan.txt deleted file mode 100644 index b0944f2..0000000 --- a/_examples/complete/plan.txt +++ /dev/null @@ -1,2054 +0,0 @@ -module.vpc_cni_irsa.data.aws_region.current: Reading... -data.aws_availability_zones.available: Reading... -module.eks.data.aws_caller_identity.current: Reading... -module.vpc_cni_irsa.data.aws_partition.current: Reading... -module.vpc_cni_irsa.data.aws_caller_identity.current: Reading... -module.eks.data.aws_partition.current: Reading... -module.eks.module.kms.data.aws_caller_identity.current: Reading... -module.vpc_cni_irsa.data.aws_partition.current: Read complete after 0s [id=aws] -module.vpc_cni_irsa.data.aws_region.current: Read complete after 0s [id=us-east-1] -module.eks.data.aws_partition.current: Read complete after 0s [id=aws] -module.eks.module.kms.data.aws_partition.current: Reading... -module.eks.module.eks_managed_node_group["critical"].data.aws_caller_identity.current: Reading... -module.eks.module.eks_managed_node_group["application"].data.aws_caller_identity.current: Reading... -module.eks.module.kms.data.aws_partition.current: Read complete after 0s [id=aws] -module.eks.module.eks_managed_node_group["application"].data.aws_partition.current: Reading... -module.eks.module.eks_managed_node_group["critical"].data.aws_partition.current: Reading... -module.eks.module.eks_managed_node_group["application"].data.aws_partition.current: Read complete after 0s [id=aws] -module.eks.module.eks_managed_node_group["critical"].data.aws_partition.current: Read complete after 0s [id=aws] -module.vpc_cni_irsa.data.aws_iam_policy_document.vpc_cni[0]: Reading... -module.eks.data.aws_iam_policy_document.assume_role_policy[0]: Reading... -module.vpc_cni_irsa.data.aws_iam_policy_document.vpc_cni[0]: Read complete after 0s [id=572553129] -module.eks.data.aws_iam_policy_document.assume_role_policy[0]: Read complete after 0s [id=1530481229] -module.eks.data.aws_caller_identity.current: Read complete after 0s [id=924144197303] -module.eks.data.aws_iam_session_context.current: Reading... -module.eks.module.kms.data.aws_caller_identity.current: Read complete after 0s [id=924144197303] -module.vpc_cni_irsa.data.aws_caller_identity.current: Read complete after 0s [id=924144197303] -module.eks.module.eks_managed_node_group["application"].data.aws_caller_identity.current: Read complete after 1s [id=924144197303] -module.eks.module.eks_managed_node_group["critical"].data.aws_caller_identity.current: Read complete after 1s [id=924144197303] -data.aws_availability_zones.available: Read complete after 1s [id=us-east-1] -module.eks.data.aws_iam_session_context.current: Read complete after 1s [id=arn:aws:sts::924144197303:assumed-role/AWSReservedSSO_AdministratorAccess_3b5b668e6e5741c8/nilesh.gadgi@clouddrove.com] -module.eks.module.eks_managed_node_group["critical"].data.aws_iam_policy_document.assume_role_policy[0]: Reading... -module.eks.module.eks_managed_node_group["application"].data.aws_iam_policy_document.assume_role_policy[0]: Reading... -module.eks.module.eks_managed_node_group["critical"].data.aws_iam_policy_document.assume_role_policy[0]: Read complete after 0s [id=1734879000] -module.eks.module.eks_managed_node_group["application"].data.aws_iam_policy_document.assume_role_policy[0]: Read complete after 0s [id=1734879000] - -Terraform used the selected providers to generate the following execution -plan. Resource actions are indicated with the following symbols: - + create - <= read (data resources) - -Terraform will perform the following actions: - - # data.aws_eks_cluster.eks_cluster will be read during apply - # (depends on a resource or a module with changes pending) - <= data "aws_eks_cluster" "eks_cluster" { - + access_config = (known after apply) - + arn = (known after apply) - + certificate_authority = (known after apply) - + cluster_id = (known after apply) - + created_at = (known after apply) - + enabled_cluster_log_types = (known after apply) - + endpoint = (known after apply) - + id = (known after apply) - + identity = (known after apply) - + kubernetes_network_config = (known after apply) - + name = "helm-addons-cluster" - + outpost_config = (known after apply) - + platform_version = (known after apply) - + role_arn = (known after apply) - + status = (known after apply) - + tags = (known after apply) - + version = (known after apply) - + vpc_config = (known after apply) - } - - # data.aws_eks_cluster_auth.eks_cluster will be read during apply - # (config refers to values not yet known) - <= data "aws_eks_cluster_auth" "eks_cluster" { - + id = (known after apply) - + name = (known after apply) - + token = (sensitive value) - } - - # aws_iam_policy.node_additional will be created - + resource "aws_iam_policy" "node_additional" { - + arn = (known after apply) - + description = "Example usage of node additional policy" - + id = (known after apply) - + name = "helm-addons-additional" - + name_prefix = (known after apply) - + path = "/" - + policy = jsonencode( - { - + Statement = [ - + { - + Action = [ - + "ec2:Describe*", - ] - + Effect = "Allow" - + Resource = "*" - }, - ] - + Version = "2012-10-17" - } - ) - + policy_id = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - } - - # module.addons.data.aws_caller_identity.current will be read during apply - # (depends on a resource or a module with changes pending) - <= data "aws_caller_identity" "current" { - + account_id = (known after apply) - + arn = (known after apply) - + id = (known after apply) - + user_id = (known after apply) - } - - # module.addons.data.aws_eks_cluster.eks_cluster will be read during apply - # (depends on a resource or a module with changes pending) - <= data "aws_eks_cluster" "eks_cluster" { - + access_config = (known after apply) - + arn = (known after apply) - + certificate_authority = (known after apply) - + cluster_id = (known after apply) - + created_at = (known after apply) - + enabled_cluster_log_types = (known after apply) - + endpoint = (known after apply) - + id = (known after apply) - + identity = (known after apply) - + kubernetes_network_config = (known after apply) - + name = "helm-addons-cluster" - + outpost_config = (known after apply) - + platform_version = (known after apply) - + role_arn = (known after apply) - + status = (known after apply) - + tags = (known after apply) - + version = (known after apply) - + vpc_config = (known after apply) - } - - # module.addons.data.aws_partition.current will be read during apply - # (depends on a resource or a module with changes pending) - <= data "aws_partition" "current" { - + dns_suffix = (known after apply) - + id = (known after apply) - + partition = (known after apply) - + reverse_dns_prefix = (known after apply) - } - - # module.addons.data.aws_region.current will be read during apply - # (depends on a resource or a module with changes pending) - <= data "aws_region" "current" { - + description = (known after apply) - + endpoint = (known after apply) - + id = (known after apply) - + name = (known after apply) - } - - # module.addons.time_sleep.dataplane will be created - + resource "time_sleep" "dataplane" { - + create_duration = "10s" - + id = (known after apply) - + triggers = { - + "data_plane_wait_arn" = "" - + "eks_cluster_id" = (known after apply) - } - } - - # module.eks.data.aws_eks_addon_version.this["vpc-cni"] will be read during apply - # (depends on a resource or a module with changes pending) - <= data "aws_eks_addon_version" "this" { - + addon_name = "vpc-cni" - + id = (known after apply) - + kubernetes_version = "1.29" - + most_recent = true - + version = (known after apply) - } - - # module.eks.data.tls_certificate.this[0] will be read during apply - # (config refers to values not yet known) - <= data "tls_certificate" "this" { - + certificates = (known after apply) - + id = (known after apply) - + url = (known after apply) - } - - # module.eks.aws_cloudwatch_log_group.this[0] will be created - + resource "aws_cloudwatch_log_group" "this" { - + arn = (known after apply) - + id = (known after apply) - + log_group_class = (known after apply) - + name = "/aws/eks/helm-addons-cluster/cluster" - + name_prefix = (known after apply) - + retention_in_days = 90 - + skip_destroy = false - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "/aws/eks/helm-addons-cluster/cluster" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "/aws/eks/helm-addons-cluster/cluster" - } - } - - # module.eks.aws_ec2_tag.cluster_primary_security_group["Environment"] will be created - + resource "aws_ec2_tag" "cluster_primary_security_group" { - + id = (known after apply) - + key = "Environment" - + resource_id = (known after apply) - + value = "test" - } - - # module.eks.aws_ec2_tag.cluster_primary_security_group["GithubOrg"] will be created - + resource "aws_ec2_tag" "cluster_primary_security_group" { - + id = (known after apply) - + key = "GithubOrg" - + resource_id = (known after apply) - + value = "clouddrove" - } - - # module.eks.aws_ec2_tag.cluster_primary_security_group["GithubRepo"] will be created - + resource "aws_ec2_tag" "cluster_primary_security_group" { - + id = (known after apply) - + key = "GithubRepo" - + resource_id = (known after apply) - + value = "terraform-helm-eks-addons" - } - - # module.eks.aws_eks_addon.before_compute["vpc-cni"] will be created - + resource "aws_eks_addon" "before_compute" { - + addon_name = "vpc-cni" - + addon_version = (known after apply) - + arn = (known after apply) - + cluster_name = "helm-addons-cluster" - + configuration_values = jsonencode( - { - + env = { - + ENABLE_PREFIX_DELEGATION = "true" - + WARM_PREFIX_TARGET = "1" - } - } - ) - + created_at = (known after apply) - + id = (known after apply) - + modified_at = (known after apply) - + resolve_conflicts = "OVERWRITE" - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - - + timeouts {} - } - - # module.eks.aws_eks_cluster.this[0] will be created - + resource "aws_eks_cluster" "this" { - + arn = (known after apply) - + certificate_authority = (known after apply) - + cluster_id = (known after apply) - + created_at = (known after apply) - + enabled_cluster_log_types = [ - + "api", - + "audit", - + "authenticator", - ] - + endpoint = (known after apply) - + id = (known after apply) - + identity = (known after apply) - + name = "helm-addons-cluster" - + platform_version = (known after apply) - + role_arn = (known after apply) - + status = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + version = "1.29" - - + encryption_config { - + resources = [ - + "secrets", - ] - - + provider { - + key_arn = (known after apply) - } - } - - + kubernetes_network_config { - + ip_family = "ipv4" - + service_ipv4_cidr = (known after apply) - + service_ipv6_cidr = (known after apply) - } - - + timeouts {} - - + vpc_config { - + cluster_security_group_id = (known after apply) - + endpoint_private_access = true - + endpoint_public_access = true - + public_access_cidrs = [ - + "0.0.0.0/0", - ] - + security_group_ids = (known after apply) - + subnet_ids = (known after apply) - + vpc_id = (known after apply) - } - } - - # module.eks.aws_iam_openid_connect_provider.oidc_provider[0] will be created - + resource "aws_iam_openid_connect_provider" "oidc_provider" { - + arn = (known after apply) - + client_id_list = [ - + "sts.amazonaws.com", - ] - + id = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + thumbprint_list = (known after apply) - + url = (known after apply) - } - - # module.eks.aws_iam_policy.cluster_encryption[0] will be created - + resource "aws_iam_policy" "cluster_encryption" { - + arn = (known after apply) - + description = "Cluster encryption policy to allow cluster role to utilize CMK provided" - + id = (known after apply) - + name = (known after apply) - + name_prefix = "helm-addons-cluster-cluster-ClusterEncryption" - + path = "/" - + policy = (known after apply) - + policy_id = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - } - - # module.eks.aws_iam_role.this[0] will be created - + resource "aws_iam_role" "this" { - + arn = (known after apply) - + assume_role_policy = jsonencode( - { - + Statement = [ - + { - + Action = "sts:AssumeRole" - + Effect = "Allow" - + Principal = { - + Service = "eks.amazonaws.com" - } - + Sid = "EKSClusterAssumeRole" - }, - ] - + Version = "2012-10-17" - } - ) - + create_date = (known after apply) - + force_detach_policies = true - + id = (known after apply) - + managed_policy_arns = (known after apply) - + max_session_duration = 3600 - + name = (known after apply) - + name_prefix = "helm-addons-cluster-cluster-" - + path = "/" - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + unique_id = (known after apply) - - + inline_policy { - + name = "helm-addons-cluster-cluster" - + policy = jsonencode( - { - + Statement = [ - + { - + Action = [ - + "logs:CreateLogGroup", - ] - + Effect = "Deny" - + Resource = "*" - }, - ] - + Version = "2012-10-17" - } - ) - } - } - - # module.eks.aws_iam_role_policy_attachment.cluster_encryption[0] will be created - + resource "aws_iam_role_policy_attachment" "cluster_encryption" { - + id = (known after apply) - + policy_arn = (known after apply) - + role = (known after apply) - } - - # module.eks.aws_iam_role_policy_attachment.this["AmazonEKSClusterPolicy"] will be created - + resource "aws_iam_role_policy_attachment" "this" { - + id = (known after apply) - + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" - + role = (known after apply) - } - - # module.eks.aws_iam_role_policy_attachment.this["AmazonEKSVPCResourceController"] will be created - + resource "aws_iam_role_policy_attachment" "this" { - + id = (known after apply) - + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController" - + role = (known after apply) - } - - # module.eks.aws_security_group.cluster[0] will be created - + resource "aws_security_group" "cluster" { - + arn = (known after apply) - + description = "EKS cluster security group" - + egress = (known after apply) - + id = (known after apply) - + ingress = (known after apply) - + name = (known after apply) - + name_prefix = "helm-addons-cluster-cluster-" - + owner_id = (known after apply) - + revoke_rules_on_delete = false - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons-cluster-cluster" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons-cluster-cluster" - } - + vpc_id = (known after apply) - } - - # module.eks.aws_security_group.node[0] will be created - + resource "aws_security_group" "node" { - + arn = (known after apply) - + description = "EKS node shared security group" - + egress = (known after apply) - + id = (known after apply) - + ingress = (known after apply) - + name = (known after apply) - + name_prefix = "helm-addons-cluster-node-" - + owner_id = (known after apply) - + revoke_rules_on_delete = false - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons-cluster-node" - + "kubernetes.io/cluster/helm-addons-cluster" = "owned" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons-cluster-node" - + "kubernetes.io/cluster/helm-addons-cluster" = "owned" - } - + vpc_id = (known after apply) - } - - # module.eks.aws_security_group_rule.cluster["ingress_nodes_443"] will be created - + resource "aws_security_group_rule" "cluster" { - + description = "Node groups to cluster API" - + from_port = 443 - + id = (known after apply) - + protocol = "tcp" - + security_group_id = (known after apply) - + security_group_rule_id = (known after apply) - + self = false - + source_security_group_id = (known after apply) - + to_port = 443 - + type = "ingress" - } - - # module.eks.aws_security_group_rule.node["egress_all"] will be created - + resource "aws_security_group_rule" "node" { - + cidr_blocks = [ - + "0.0.0.0/0", - ] - + description = "Allow all egress" - + from_port = 0 - + id = (known after apply) - + prefix_list_ids = [] - + protocol = "-1" - + security_group_id = (known after apply) - + security_group_rule_id = (known after apply) - + self = false - + source_security_group_id = (known after apply) - + to_port = 0 - + type = "egress" - } - - # module.eks.aws_security_group_rule.node["ingress_cluster_443"] will be created - + resource "aws_security_group_rule" "node" { - + description = "Cluster API to node groups" - + from_port = 443 - + id = (known after apply) - + prefix_list_ids = [] - + protocol = "tcp" - + security_group_id = (known after apply) - + security_group_rule_id = (known after apply) - + self = false - + source_security_group_id = (known after apply) - + to_port = 443 - + type = "ingress" - } - - # module.eks.aws_security_group_rule.node["ingress_cluster_4443_webhook"] will be created - + resource "aws_security_group_rule" "node" { - + description = "Cluster API to node 4443/tcp webhook" - + from_port = 4443 - + id = (known after apply) - + prefix_list_ids = [] - + protocol = "tcp" - + security_group_id = (known after apply) - + security_group_rule_id = (known after apply) - + self = false - + source_security_group_id = (known after apply) - + to_port = 4443 - + type = "ingress" - } - - # module.eks.aws_security_group_rule.node["ingress_cluster_6443_webhook"] will be created - + resource "aws_security_group_rule" "node" { - + description = "Cluster API to node 6443/tcp webhook" - + from_port = 6443 - + id = (known after apply) - + prefix_list_ids = [] - + protocol = "tcp" - + security_group_id = (known after apply) - + security_group_rule_id = (known after apply) - + self = false - + source_security_group_id = (known after apply) - + to_port = 6443 - + type = "ingress" - } - - # module.eks.aws_security_group_rule.node["ingress_cluster_8443_webhook"] will be created - + resource "aws_security_group_rule" "node" { - + description = "Cluster API to node 8443/tcp webhook" - + from_port = 8443 - + id = (known after apply) - + prefix_list_ids = [] - + protocol = "tcp" - + security_group_id = (known after apply) - + security_group_rule_id = (known after apply) - + self = false - + source_security_group_id = (known after apply) - + to_port = 8443 - + type = "ingress" - } - - # module.eks.aws_security_group_rule.node["ingress_cluster_9443_webhook"] will be created - + resource "aws_security_group_rule" "node" { - + description = "Cluster API to node 9443/tcp webhook" - + from_port = 9443 - + id = (known after apply) - + prefix_list_ids = [] - + protocol = "tcp" - + security_group_id = (known after apply) - + security_group_rule_id = (known after apply) - + self = false - + source_security_group_id = (known after apply) - + to_port = 9443 - + type = "ingress" - } - - # module.eks.aws_security_group_rule.node["ingress_cluster_kubelet"] will be created - + resource "aws_security_group_rule" "node" { - + description = "Cluster API to node kubelets" - + from_port = 10250 - + id = (known after apply) - + prefix_list_ids = [] - + protocol = "tcp" - + security_group_id = (known after apply) - + security_group_rule_id = (known after apply) - + self = false - + source_security_group_id = (known after apply) - + to_port = 10250 - + type = "ingress" - } - - # module.eks.aws_security_group_rule.node["ingress_nodes_ephemeral"] will be created - + resource "aws_security_group_rule" "node" { - + description = "Node to node ingress on ephemeral ports" - + from_port = 1025 - + id = (known after apply) - + prefix_list_ids = [] - + protocol = "tcp" - + security_group_id = (known after apply) - + security_group_rule_id = (known after apply) - + self = true - + source_security_group_id = (known after apply) - + to_port = 65535 - + type = "ingress" - } - - # module.eks.aws_security_group_rule.node["ingress_self_coredns_tcp"] will be created - + resource "aws_security_group_rule" "node" { - + description = "Node to node CoreDNS" - + from_port = 53 - + id = (known after apply) - + prefix_list_ids = [] - + protocol = "tcp" - + security_group_id = (known after apply) - + security_group_rule_id = (known after apply) - + self = true - + source_security_group_id = (known after apply) - + to_port = 53 - + type = "ingress" - } - - # module.eks.aws_security_group_rule.node["ingress_self_coredns_udp"] will be created - + resource "aws_security_group_rule" "node" { - + description = "Node to node CoreDNS UDP" - + from_port = 53 - + id = (known after apply) - + prefix_list_ids = [] - + protocol = "udp" - + security_group_id = (known after apply) - + security_group_rule_id = (known after apply) - + self = true - + source_security_group_id = (known after apply) - + to_port = 53 - + type = "ingress" - } - - # module.eks.time_sleep.this[0] will be created - + resource "time_sleep" "this" { - + create_duration = "30s" - + id = (known after apply) - + triggers = { - + "cluster_certificate_authority_data" = (known after apply) - + "cluster_endpoint" = (known after apply) - + "cluster_name" = "helm-addons-cluster" - + "cluster_version" = "1.29" - } - } - - # module.vpc.aws_default_network_acl.this[0] will be created - + resource "aws_default_network_acl" "this" { - + arn = (known after apply) - + default_network_acl_id = (known after apply) - + id = (known after apply) - + owner_id = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + vpc_id = (known after apply) - - + egress { - + action = "allow" - + from_port = 0 - + ipv6_cidr_block = "::/0" - + protocol = "-1" - + rule_no = 101 - + to_port = 0 - } - + egress { - + action = "allow" - + cidr_block = "0.0.0.0/0" - + from_port = 0 - + protocol = "-1" - + rule_no = 100 - + to_port = 0 - } - - + ingress { - + action = "allow" - + from_port = 0 - + ipv6_cidr_block = "::/0" - + protocol = "-1" - + rule_no = 101 - + to_port = 0 - } - + ingress { - + action = "allow" - + cidr_block = "0.0.0.0/0" - + from_port = 0 - + protocol = "-1" - + rule_no = 100 - + to_port = 0 - } - } - - # module.vpc.aws_default_route_table.default[0] will be created - + resource "aws_default_route_table" "default" { - + arn = (known after apply) - + default_route_table_id = (known after apply) - + id = (known after apply) - + owner_id = (known after apply) - + route = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + vpc_id = (known after apply) - - + timeouts { - + create = "5m" - + update = "5m" - } - } - - # module.vpc.aws_default_security_group.this[0] will be created - + resource "aws_default_security_group" "this" { - + arn = (known after apply) - + description = (known after apply) - + egress = (known after apply) - + id = (known after apply) - + ingress = (known after apply) - + name = (known after apply) - + name_prefix = (known after apply) - + owner_id = (known after apply) - + revoke_rules_on_delete = false - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + vpc_id = (known after apply) - } - - # module.vpc.aws_eip.nat[0] will be created - + resource "aws_eip" "nat" { - + allocation_id = (known after apply) - + association_id = (known after apply) - + carrier_ip = (known after apply) - + customer_owned_ip = (known after apply) - + domain = "vpc" - + id = (known after apply) - + instance = (known after apply) - + network_border_group = (known after apply) - + network_interface = (known after apply) - + private_dns = (known after apply) - + private_ip = (known after apply) - + public_dns = (known after apply) - + public_ip = (known after apply) - + public_ipv4_pool = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + vpc = (known after apply) - } - - # module.vpc.aws_internet_gateway.this[0] will be created - + resource "aws_internet_gateway" "this" { - + arn = (known after apply) - + id = (known after apply) - + owner_id = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + vpc_id = (known after apply) - } - - # module.vpc.aws_nat_gateway.this[0] will be created - + resource "aws_nat_gateway" "this" { - + allocation_id = (known after apply) - + association_id = (known after apply) - + connectivity_type = "public" - + id = (known after apply) - + network_interface_id = (known after apply) - + private_ip = (known after apply) - + public_ip = (known after apply) - + secondary_private_ip_address_count = (known after apply) - + secondary_private_ip_addresses = (known after apply) - + subnet_id = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - } - - # module.vpc.aws_route.private_nat_gateway[0] will be created - + resource "aws_route" "private_nat_gateway" { - + destination_cidr_block = "0.0.0.0/0" - + id = (known after apply) - + instance_id = (known after apply) - + instance_owner_id = (known after apply) - + nat_gateway_id = (known after apply) - + network_interface_id = (known after apply) - + origin = (known after apply) - + route_table_id = (known after apply) - + state = (known after apply) - - + timeouts { - + create = "5m" - } - } - - # module.vpc.aws_route.public_internet_gateway[0] will be created - + resource "aws_route" "public_internet_gateway" { - + destination_cidr_block = "0.0.0.0/0" - + gateway_id = (known after apply) - + id = (known after apply) - + instance_id = (known after apply) - + instance_owner_id = (known after apply) - + network_interface_id = (known after apply) - + origin = (known after apply) - + route_table_id = (known after apply) - + state = (known after apply) - - + timeouts { - + create = "5m" - } - } - - # module.vpc.aws_route_table.private[0] will be created - + resource "aws_route_table" "private" { - + arn = (known after apply) - + id = (known after apply) - + owner_id = (known after apply) - + propagating_vgws = (known after apply) - + route = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + vpc_id = (known after apply) - } - - # module.vpc.aws_route_table.public[0] will be created - + resource "aws_route_table" "public" { - + arn = (known after apply) - + id = (known after apply) - + owner_id = (known after apply) - + propagating_vgws = (known after apply) - + route = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + vpc_id = (known after apply) - } - - # module.vpc.aws_route_table_association.database[0] will be created - + resource "aws_route_table_association" "database" { - + id = (known after apply) - + route_table_id = (known after apply) - + subnet_id = (known after apply) - } - - # module.vpc.aws_route_table_association.database[1] will be created - + resource "aws_route_table_association" "database" { - + id = (known after apply) - + route_table_id = (known after apply) - + subnet_id = (known after apply) - } - - # module.vpc.aws_route_table_association.database[2] will be created - + resource "aws_route_table_association" "database" { - + id = (known after apply) - + route_table_id = (known after apply) - + subnet_id = (known after apply) - } - - # module.vpc.aws_route_table_association.private[0] will be created - + resource "aws_route_table_association" "private" { - + id = (known after apply) - + route_table_id = (known after apply) - + subnet_id = (known after apply) - } - - # module.vpc.aws_route_table_association.private[1] will be created - + resource "aws_route_table_association" "private" { - + id = (known after apply) - + route_table_id = (known after apply) - + subnet_id = (known after apply) - } - - # module.vpc.aws_route_table_association.private[2] will be created - + resource "aws_route_table_association" "private" { - + id = (known after apply) - + route_table_id = (known after apply) - + subnet_id = (known after apply) - } - - # module.vpc.aws_route_table_association.public[0] will be created - + resource "aws_route_table_association" "public" { - + id = (known after apply) - + route_table_id = (known after apply) - + subnet_id = (known after apply) - } - - # module.vpc.aws_route_table_association.public[1] will be created - + resource "aws_route_table_association" "public" { - + id = (known after apply) - + route_table_id = (known after apply) - + subnet_id = (known after apply) - } - - # module.vpc.aws_route_table_association.public[2] will be created - + resource "aws_route_table_association" "public" { - + id = (known after apply) - + route_table_id = (known after apply) - + subnet_id = (known after apply) - } - - # module.vpc.aws_subnet.database[0] will be created - + resource "aws_subnet" "database" { - + arn = (known after apply) - + assign_ipv6_address_on_creation = false - + availability_zone = "us-east-1a" - + availability_zone_id = (known after apply) - + cidr_block = "10.0.8.0/24" - + enable_dns64 = false - + enable_resource_name_dns_a_record_on_launch = false - + enable_resource_name_dns_aaaa_record_on_launch = false - + id = (known after apply) - + ipv6_cidr_block_association_id = (known after apply) - + ipv6_native = false - + map_public_ip_on_launch = false - + owner_id = (known after apply) - + private_dns_hostname_type_on_launch = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + vpc_id = (known after apply) - } - - # module.vpc.aws_subnet.database[1] will be created - + resource "aws_subnet" "database" { - + arn = (known after apply) - + assign_ipv6_address_on_creation = false - + availability_zone = "us-east-1b" - + availability_zone_id = (known after apply) - + cidr_block = "10.0.9.0/24" - + enable_dns64 = false - + enable_resource_name_dns_a_record_on_launch = false - + enable_resource_name_dns_aaaa_record_on_launch = false - + id = (known after apply) - + ipv6_cidr_block_association_id = (known after apply) - + ipv6_native = false - + map_public_ip_on_launch = false - + owner_id = (known after apply) - + private_dns_hostname_type_on_launch = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + vpc_id = (known after apply) - } - - # module.vpc.aws_subnet.database[2] will be created - + resource "aws_subnet" "database" { - + arn = (known after apply) - + assign_ipv6_address_on_creation = false - + availability_zone = "us-east-1c" - + availability_zone_id = (known after apply) - + cidr_block = "10.0.10.0/24" - + enable_dns64 = false - + enable_resource_name_dns_a_record_on_launch = false - + enable_resource_name_dns_aaaa_record_on_launch = false - + id = (known after apply) - + ipv6_cidr_block_association_id = (known after apply) - + ipv6_native = false - + map_public_ip_on_launch = false - + owner_id = (known after apply) - + private_dns_hostname_type_on_launch = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + vpc_id = (known after apply) - } - - # module.vpc.aws_subnet.private[0] will be created - + resource "aws_subnet" "private" { - + arn = (known after apply) - + assign_ipv6_address_on_creation = false - + availability_zone = "us-east-1a" - + availability_zone_id = (known after apply) - + cidr_block = "10.0.0.0/24" - + enable_dns64 = false - + enable_resource_name_dns_a_record_on_launch = false - + enable_resource_name_dns_aaaa_record_on_launch = false - + id = (known after apply) - + ipv6_cidr_block_association_id = (known after apply) - + ipv6_native = false - + map_public_ip_on_launch = false - + owner_id = (known after apply) - + private_dns_hostname_type_on_launch = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - + "kubernetes.io/role/internal-elb" = "1" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - + "kubernetes.io/role/internal-elb" = "1" - } - + vpc_id = (known after apply) - } - - # module.vpc.aws_subnet.private[1] will be created - + resource "aws_subnet" "private" { - + arn = (known after apply) - + assign_ipv6_address_on_creation = false - + availability_zone = "us-east-1b" - + availability_zone_id = (known after apply) - + cidr_block = "10.0.1.0/24" - + enable_dns64 = false - + enable_resource_name_dns_a_record_on_launch = false - + enable_resource_name_dns_aaaa_record_on_launch = false - + id = (known after apply) - + ipv6_cidr_block_association_id = (known after apply) - + ipv6_native = false - + map_public_ip_on_launch = false - + owner_id = (known after apply) - + private_dns_hostname_type_on_launch = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - + "kubernetes.io/role/internal-elb" = "1" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - + "kubernetes.io/role/internal-elb" = "1" - } - + vpc_id = (known after apply) - } - - # module.vpc.aws_subnet.private[2] will be created - + resource "aws_subnet" "private" { - + arn = (known after apply) - + assign_ipv6_address_on_creation = false - + availability_zone = "us-east-1c" - + availability_zone_id = (known after apply) - + cidr_block = "10.0.2.0/24" - + enable_dns64 = false - + enable_resource_name_dns_a_record_on_launch = false - + enable_resource_name_dns_aaaa_record_on_launch = false - + id = (known after apply) - + ipv6_cidr_block_association_id = (known after apply) - + ipv6_native = false - + map_public_ip_on_launch = false - + owner_id = (known after apply) - + private_dns_hostname_type_on_launch = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - + "kubernetes.io/role/internal-elb" = "1" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - + "kubernetes.io/role/internal-elb" = "1" - } - + vpc_id = (known after apply) - } - - # module.vpc.aws_subnet.public[0] will be created - + resource "aws_subnet" "public" { - + arn = (known after apply) - + assign_ipv6_address_on_creation = false - + availability_zone = "us-east-1a" - + availability_zone_id = (known after apply) - + cidr_block = "10.0.4.0/24" - + enable_dns64 = false - + enable_resource_name_dns_a_record_on_launch = false - + enable_resource_name_dns_aaaa_record_on_launch = false - + id = (known after apply) - + ipv6_cidr_block_association_id = (known after apply) - + ipv6_native = false - + map_public_ip_on_launch = false - + owner_id = (known after apply) - + private_dns_hostname_type_on_launch = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - + "kubernetes.io/role/elb" = "1" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - + "kubernetes.io/role/elb" = "1" - } - + vpc_id = (known after apply) - } - - # module.vpc.aws_subnet.public[1] will be created - + resource "aws_subnet" "public" { - + arn = (known after apply) - + assign_ipv6_address_on_creation = false - + availability_zone = "us-east-1b" - + availability_zone_id = (known after apply) - + cidr_block = "10.0.5.0/24" - + enable_dns64 = false - + enable_resource_name_dns_a_record_on_launch = false - + enable_resource_name_dns_aaaa_record_on_launch = false - + id = (known after apply) - + ipv6_cidr_block_association_id = (known after apply) - + ipv6_native = false - + map_public_ip_on_launch = false - + owner_id = (known after apply) - + private_dns_hostname_type_on_launch = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - + "kubernetes.io/role/elb" = "1" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - + "kubernetes.io/role/elb" = "1" - } - + vpc_id = (known after apply) - } - - # module.vpc.aws_subnet.public[2] will be created - + resource "aws_subnet" "public" { - + arn = (known after apply) - + assign_ipv6_address_on_creation = false - + availability_zone = "us-east-1c" - + availability_zone_id = (known after apply) - + cidr_block = "10.0.6.0/24" - + enable_dns64 = false - + enable_resource_name_dns_a_record_on_launch = false - + enable_resource_name_dns_aaaa_record_on_launch = false - + id = (known after apply) - + ipv6_cidr_block_association_id = (known after apply) - + ipv6_native = false - + map_public_ip_on_launch = false - + owner_id = (known after apply) - + private_dns_hostname_type_on_launch = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - + "kubernetes.io/role/elb" = "1" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - + "kubernetes.io/role/elb" = "1" - } - + vpc_id = (known after apply) - } - - # module.vpc.aws_vpc.this[0] will be created - + resource "aws_vpc" "this" { - + arn = (known after apply) - + cidr_block = "10.0.0.0/16" - + default_network_acl_id = (known after apply) - + default_route_table_id = (known after apply) - + default_security_group_id = (known after apply) - + dhcp_options_id = (known after apply) - + enable_dns_hostnames = true - + enable_dns_support = true - + enable_network_address_usage_metrics = (known after apply) - + id = (known after apply) - + instance_tenancy = "default" - + ipv6_association_id = (known after apply) - + ipv6_cidr_block = (known after apply) - + ipv6_cidr_block_network_border_group = (known after apply) - + main_route_table_id = (known after apply) - + owner_id = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - } - - # module.vpc_cni_irsa.data.aws_iam_policy_document.this[0] will be read during apply - # (config refers to values not yet known) - <= data "aws_iam_policy_document" "this" { - + id = (known after apply) - + json = (known after apply) - - + statement { - + actions = [ - + "sts:AssumeRoleWithWebIdentity", - ] - + effect = "Allow" - - + condition { - + test = "StringEquals" - + values = [ - + "sts.amazonaws.com", - ] - + variable = (known after apply) - } - + condition { - + test = "StringEquals" - + values = [ - + "system:serviceaccount:kube-system:aws-node", - ] - + variable = (known after apply) - } - - + principals { - + identifiers = [ - + (known after apply), - ] - + type = "Federated" - } - } - } - - # module.vpc_cni_irsa.aws_iam_policy.vpc_cni[0] will be created - + resource "aws_iam_policy" "vpc_cni" { - + arn = (known after apply) - + description = "Provides the Amazon VPC CNI Plugin (amazon-vpc-cni-k8s) the permissions it requires to modify the IPv4/IPv6 address configuration on your EKS worker nodes" - + id = (known after apply) - + name = (known after apply) - + name_prefix = "AmazonEKS_CNI_Policy-" - + path = "/" - + policy = jsonencode( - { - + Statement = [ - + { - + Action = [ - + "ec2:DescribeTags", - + "ec2:DescribeNetworkInterfaces", - + "ec2:DescribeInstances", - + "ec2:DescribeInstanceTypes", - + "ec2:AssignIpv6Addresses", - ] - + Effect = "Allow" - + Resource = "*" - + Sid = "IPV6" - }, - + { - + Action = "ec2:CreateTags" - + Effect = "Allow" - + Resource = "arn:aws:ec2:*:*:network-interface/*" - + Sid = "CreateTags" - }, - ] - + Version = "2012-10-17" - } - ) - + policy_id = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - } - - # module.vpc_cni_irsa.aws_iam_role.this[0] will be created - + resource "aws_iam_role" "this" { - + arn = (known after apply) - + assume_role_policy = (known after apply) - + create_date = (known after apply) - + force_detach_policies = true - + id = (known after apply) - + managed_policy_arns = (known after apply) - + max_session_duration = 3600 - + name = (known after apply) - + name_prefix = "VPC-CNI-IRSA" - + path = "/" - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + unique_id = (known after apply) - } - - # module.vpc_cni_irsa.aws_iam_role_policy_attachment.vpc_cni[0] will be created - + resource "aws_iam_role_policy_attachment" "vpc_cni" { - + id = (known after apply) - + policy_arn = (known after apply) - + role = (known after apply) - } - - # module.addons.module.prometheus_cloudwatch_exporter[0].aws_iam_policy.policy will be created - + resource "aws_iam_policy" "policy" { - + arn = (known after apply) - + description = "IAM Policy used by prometheus-cloudwatch-exporter-helm-addons-cluster IAM Role" - + id = (known after apply) - + name = "prometheus-cloudwatch-exporter-helm-addons-cluster" - + name_prefix = (known after apply) - + path = "/" - + policy = jsonencode( - { - + Statement = [ - + { - + Action = [ - + "cloudwatch:ListMetrics", - + "cloudwatch:GetMetricStatistics", - + "cloudwatch:GetMetricData", - ] - + Effect = "Allow" - + Resource = "*" - + Sid = "AllowCloudwatch" - }, - + { - + Action = [ - + "tag:GetResources", - ] - + Effect = "Allow" - + Resource = "*" - + Sid = "AllowResourceTagging" - }, - ] - + Version = "2012-10-17" - } - ) - + policy_id = (known after apply) - + tags_all = (known after apply) - } - - # module.addons.module.prometheus_cloudwatch_exporter[0].kubectl_manifest.secret_manifest[0] will be created - + resource "kubectl_manifest" "secret_manifest" { - + api_version = "v1" - + apply_only = false - + force_conflicts = false - + force_new = false - + id = (known after apply) - + kind = "Secret" - + live_manifest_incluster = (sensitive value) - + live_uid = (known after apply) - + name = "nilesh" - + namespace = "monitoring" - + server_side_apply = false - + uid = (known after apply) - + validate_schema = true - + wait_for_rollout = true - + yaml_body = (sensitive value) - + yaml_body_parsed = <<-EOT - apiVersion: v1 - data: (sensitive value) - kind: Secret - metadata: - name: nilesh - namespace: monitoring - type: Opaque - EOT - + yaml_incluster = (sensitive value) - } - - # module.addons.module.prometheus_cloudwatch_exporter[0].kubernetes_namespace.prometheus_cloudwatch_exporter_namespace will be created - + resource "kubernetes_namespace" "prometheus_cloudwatch_exporter_namespace" { - + id = (known after apply) - + wait_for_default_service_account = false - - + metadata { - + generation = (known after apply) - + name = "monitoring" - + resource_version = (known after apply) - + uid = (known after apply) - } - } - - # module.eks.module.eks_managed_node_group["application"].aws_eks_node_group.this[0] will be created - + resource "aws_eks_node_group" "this" { - + ami_type = "AL2_x86_64" - + arn = (known after apply) - + capacity_type = "SPOT" - + cluster_name = "helm-addons-cluster" - + disk_size = 20 - + id = (known after apply) - + instance_types = [ - + "t3.medium", - ] - + node_group_name = "application" - + node_group_name_prefix = (known after apply) - + node_role_arn = (known after apply) - + release_version = (known after apply) - + resources = (known after apply) - + status = (known after apply) - + subnet_ids = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "application" - + "karpenter.sh/discovery/helm-addons-cluster" = "helm-addons-cluster" - + "kubernetes.io/cluster/helm-addons-cluster" = "shared" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "application" - + "karpenter.sh/discovery/helm-addons-cluster" = "helm-addons-cluster" - + "kubernetes.io/cluster/helm-addons-cluster" = "shared" - } - + version = "1.29" - - + scaling_config { - + desired_size = 0 - + max_size = 1 - + min_size = 0 - } - - + timeouts {} - - + update_config { - + max_unavailable_percentage = 33 - } - } - - # module.eks.module.eks_managed_node_group["application"].aws_iam_role.this[0] will be created - + resource "aws_iam_role" "this" { - + arn = (known after apply) - + assume_role_policy = jsonencode( - { - + Statement = [ - + { - + Action = "sts:AssumeRole" - + Effect = "Allow" - + Principal = { - + Service = "ec2.amazonaws.com" - } - + Sid = "EKSNodeAssumeRole" - }, - ] - + Version = "2012-10-17" - } - ) - + create_date = (known after apply) - + description = "EKS managed node group IAM role" - + force_detach_policies = true - + id = (known after apply) - + managed_policy_arns = (known after apply) - + max_session_duration = 3600 - + name = (known after apply) - + name_prefix = "application-eks-node-group-" - + path = "/" - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - + "karpenter.sh/discovery/helm-addons-cluster" = "helm-addons-cluster" - + "kubernetes.io/cluster/helm-addons-cluster" = "shared" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - + "karpenter.sh/discovery/helm-addons-cluster" = "helm-addons-cluster" - + "kubernetes.io/cluster/helm-addons-cluster" = "shared" - } - + unique_id = (known after apply) - } - - # module.eks.module.eks_managed_node_group["application"].aws_iam_role_policy_attachment.additional["policy_arn"] will be created - + resource "aws_iam_role_policy_attachment" "additional" { - + id = (known after apply) - + policy_arn = (known after apply) - + role = (known after apply) - } - - # module.eks.module.eks_managed_node_group["application"].aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"] will be created - + resource "aws_iam_role_policy_attachment" "this" { - + id = (known after apply) - + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" - + role = (known after apply) - } - - # module.eks.module.eks_managed_node_group["application"].aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"] will be created - + resource "aws_iam_role_policy_attachment" "this" { - + id = (known after apply) - + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" - + role = (known after apply) - } - - # module.eks.module.eks_managed_node_group["application"].aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"] will be created - + resource "aws_iam_role_policy_attachment" "this" { - + id = (known after apply) - + policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" - + role = (known after apply) - } - - # module.eks.module.eks_managed_node_group["critical"].aws_eks_node_group.this[0] will be created - + resource "aws_eks_node_group" "this" { - + ami_type = "AL2_x86_64" - + arn = (known after apply) - + capacity_type = "ON_DEMAND" - + cluster_name = "helm-addons-cluster" - + disk_size = 20 - + id = (known after apply) - + instance_types = [ - + "t3.medium", - ] - + node_group_name = "critical" - + node_group_name_prefix = (known after apply) - + node_role_arn = (known after apply) - + release_version = (known after apply) - + resources = (known after apply) - + status = (known after apply) - + subnet_ids = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "critical" - + "karpenter.sh/discovery/helm-addons-cluster" = "helm-addons-cluster" - + "kubernetes.io/cluster/helm-addons-cluster" = "shared" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "critical" - + "karpenter.sh/discovery/helm-addons-cluster" = "helm-addons-cluster" - + "kubernetes.io/cluster/helm-addons-cluster" = "shared" - } - + version = "1.29" - - + scaling_config { - + desired_size = 1 - + max_size = 2 - + min_size = 1 - } - - + timeouts {} - - + update_config { - + max_unavailable_percentage = 33 - } - } - - # module.eks.module.eks_managed_node_group["critical"].aws_iam_role.this[0] will be created - + resource "aws_iam_role" "this" { - + arn = (known after apply) - + assume_role_policy = jsonencode( - { - + Statement = [ - + { - + Action = "sts:AssumeRole" - + Effect = "Allow" - + Principal = { - + Service = "ec2.amazonaws.com" - } - + Sid = "EKSNodeAssumeRole" - }, - ] - + Version = "2012-10-17" - } - ) - + create_date = (known after apply) - + description = "EKS managed node group IAM role" - + force_detach_policies = true - + id = (known after apply) - + managed_policy_arns = (known after apply) - + max_session_duration = 3600 - + name = (known after apply) - + name_prefix = "critical-eks-node-group-" - + path = "/" - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - + "karpenter.sh/discovery/helm-addons-cluster" = "helm-addons-cluster" - + "kubernetes.io/cluster/helm-addons-cluster" = "shared" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - + "karpenter.sh/discovery/helm-addons-cluster" = "helm-addons-cluster" - + "kubernetes.io/cluster/helm-addons-cluster" = "shared" - } - + unique_id = (known after apply) - } - - # module.eks.module.eks_managed_node_group["critical"].aws_iam_role_policy_attachment.additional["policy_arn"] will be created - + resource "aws_iam_role_policy_attachment" "additional" { - + id = (known after apply) - + policy_arn = (known after apply) - + role = (known after apply) - } - - # module.eks.module.eks_managed_node_group["critical"].aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"] will be created - + resource "aws_iam_role_policy_attachment" "this" { - + id = (known after apply) - + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" - + role = (known after apply) - } - - # module.eks.module.eks_managed_node_group["critical"].aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"] will be created - + resource "aws_iam_role_policy_attachment" "this" { - + id = (known after apply) - + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" - + role = (known after apply) - } - - # module.eks.module.eks_managed_node_group["critical"].aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"] will be created - + resource "aws_iam_role_policy_attachment" "this" { - + id = (known after apply) - + policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" - + role = (known after apply) - } - - # module.eks.module.kms.data.aws_iam_policy_document.this[0] will be read during apply - # (config refers to values not yet known) - <= data "aws_iam_policy_document" "this" { - + id = (known after apply) - + json = (known after apply) - + override_policy_documents = [] - + source_policy_documents = [] - - + statement { - + actions = [ - + "kms:CancelKeyDeletion", - + "kms:Create*", - + "kms:Delete*", - + "kms:Describe*", - + "kms:Disable*", - + "kms:Enable*", - + "kms:Get*", - + "kms:List*", - + "kms:Put*", - + "kms:Revoke*", - + "kms:ScheduleKeyDeletion", - + "kms:TagResource", - + "kms:UntagResource", - + "kms:Update*", - ] - + resources = [ - + "*", - ] - + sid = "KeyAdministration" - - + principals { - + identifiers = [ - + "arn:aws:iam::924144197303:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO_AdministratorAccess_3b5b668e6e5741c8", - ] - + type = "AWS" - } - } - + statement { - + actions = [ - + "kms:Decrypt", - + "kms:DescribeKey", - + "kms:Encrypt", - + "kms:GenerateDataKey*", - + "kms:ReEncrypt*", - ] - + resources = [ - + "*", - ] - + sid = "KeyUsage" - - + principals { - + identifiers = [ - + (known after apply), - ] - + type = "AWS" - } - } - } - - # module.eks.module.kms.aws_kms_alias.this["cluster"] will be created - + resource "aws_kms_alias" "this" { - + arn = (known after apply) - + id = (known after apply) - + name = "alias/eks/helm-addons-cluster" - + name_prefix = (known after apply) - + target_key_arn = (known after apply) - + target_key_id = (known after apply) - } - - # module.eks.module.kms.aws_kms_key.this[0] will be created - + resource "aws_kms_key" "this" { - + arn = (known after apply) - + bypass_policy_lockout_safety_check = false - + customer_master_key_spec = "SYMMETRIC_DEFAULT" - + description = "helm-addons-cluster cluster encryption key" - + enable_key_rotation = true - + id = (known after apply) - + is_enabled = true - + key_id = (known after apply) - + key_usage = "ENCRYPT_DECRYPT" - + multi_region = false - + policy = (known after apply) - + tags = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - + tags_all = { - + "Environment" = "test" - + "GithubOrg" = "clouddrove" - + "GithubRepo" = "terraform-helm-eks-addons" - + "Name" = "helm-addons" - } - } - - # module.addons.module.prometheus_cloudwatch_exporter[0].module.prometheus_cloudwatch_exporter_secret[0].helm_release.addon[0] will be created - + resource "helm_release" "addon" { - + atomic = true - + chart = "prometheus-cloudwatch-exporter" - + cleanup_on_fail = false - + create_namespace = true - + dependency_update = false - + description = "Prometheus Cloudwatch-Exporter helm Chart deployment configuration" - + disable_crd_hooks = false - + disable_openapi_validation = false - + disable_webhooks = false - + force_update = false - + id = (known after apply) - + lint = false - + manifest = (known after apply) - + max_history = 0 - + metadata = (known after apply) - + name = "prometheus-cloudwatch-exporter" - + namespace = "monitoring" - + pass_credentials = false - + recreate_pods = false - + render_subchart_notes = true - + replace = false - + repository = "https://prometheus-community.github.io/helm-charts" - + reset_values = false - + reuse_values = false - + skip_crds = false - + status = "deployed" - + timeout = 600 - + values = [ - + <<-EOT - ## Node affinity for particular node in which labels key is "Infra-Services" and value is "true" - - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "eks.amazonaws.com/nodegroup" - operator: In - values: - - "critical" - - ## Using limits and requests - resources: - limits: - cpu: 300m - memory: 250Mi - requests: - cpu: 50m - memory: 150Mi - - # Configuration is rendered with `tpl` function, therefore you can use any Helm variables and/or templates here - config: |- - # This is the default configuration for prometheus-cloudwatch-exporter - region: us-east-1 - metrics: - - aws_dimensions: - - InstanceId - aws_metric_name: CPUUtilization - aws_namespace: AWS/EC2 - aws_statistics: - - Average - aws_tag_select: - resource_type_selection: ec2:instance - resource_id_dimension: InstanceId - - aws_dimensions: - - InstanceId - aws_metric_name: NetworkIn - aws_namespace: AWS/EC2 - aws_statistics: - - Average - - aws_dimensions: - - InstanceId - aws_metric_name: NetworkOut - aws_namespace: AWS/EC2 - aws_statistics: - - Average - - aws_dimensions: - - InstanceId - aws_metric_name: NetworkPacketsIn - aws_namespace: AWS/EC2 - aws_statistics: - - Average - - aws_dimensions: - - InstanceId - aws_metric_name: NetworkPacketsOut - aws_namespace: AWS/EC2 - aws_statistics: - - Average - - aws_dimensions: - - InstanceId - aws_metric_name: DiskWriteBytes - aws_namespace: AWS/EC2 - aws_statistics: - - Average - - aws_dimensions: - - InstanceId - aws_metric_name: DiskReadBytes - aws_namespace: AWS/EC2 - aws_statistics: - - Average - - aws_dimensions: - - InstanceId - aws_metric_name: CPUCreditBalance - aws_namespace: AWS/EC2 - aws_statistics: - - Average - - aws_dimensions: - - InstanceId - aws_metric_name: CPUCreditUsage - aws_namespace: AWS/EC2 - aws_statistics: - - Average - - aws_dimensions: - - InstanceId - aws_metric_name: StatusCheckFailed - aws_namespace: AWS/EC2 - aws_statistics: - - Sum - - aws_dimensions: - - InstanceId - aws_metric_name: StatusCheckFailed_Instance - aws_namespace: AWS/EC2 - aws_statistics: - - Sum - - aws_dimensions: - - InstanceId - aws_metric_name: StatusCheckFailed_System - aws_namespace: AWS/EC2 - aws_statistics: - - Sum - EOT, - ] - + verify = false - + version = "0.25.2" - + wait = true - + wait_for_jobs = false - - + postrender {} - - + set { - + name = "aws.secret.name" - + value = "aws" - } - } - -Plan: 78 to add, 0 to change, 0 to destroy. - -Changes to Outputs: - + cluster_name = "helm-addons-cluster" - + istio-ingress = "Istio does not support the installation of istio-helmchart in a namespace other than istio-system. We have provided a namespace feature in case Istio-helmchart maintainers fix this issue." - + region = "us-east-1" - + update_kubeconfig = "aws eks update-kubeconfig --name helm-addons-cluster --region us-east-1" - + velero_post_installation = <<-EOT - Once velero server is up and running you need the client before you can use it - - 1. wget https://github.com/vmware-tanzu/velero/releases/download/v1.11.1/velero-v1.11.1-darwin-amd64.tar.gz - 2. tar -xvf velero-v1.11.1-darwin-amd64.tar.gz -C velero-client - EOT -β•· -β”‚ Warning: Argument is deprecated -β”‚  -β”‚  with module.eks.aws_eks_addon.before_compute["vpc-cni"], -β”‚  on .terraform/modules/eks/main.tf line 420, in resource "aws_eks_addon" "before_compute": -β”‚  420: resolve_conflicts = try(each.value.resolve_conflicts, "OVERWRITE") -β”‚  -β”‚ The "resolve_conflicts" attribute can't be set to "PRESERVE" on initial resource creation. Use "resolve_conflicts_on_create" and/or -β”‚ "resolve_conflicts_on_update" instead -β•΅ - -───────────────────────────────────────────────────────────────────────────── - -Note: You didn't use the -out option to save this plan, so Terraform can't -guarantee to take exactly these actions if you run "terraform apply" now.