Skip to content

Commit

Permalink
Merge pull request #8820 from ministryofjustice/standard-lifecycle-v1
Browse files Browse the repository at this point in the history
Adjust AWS S3 Lifecycle Management Intelligent Lifecycle
  • Loading branch information
harichintala1 authored Dec 3, 2024
2 parents 8d2f502 + 2c5836d commit 883814c
Show file tree
Hide file tree
Showing 6 changed files with 133 additions and 84 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,8 @@
"enable_dbt_k8s_secrets": true,
"dpr_generic_athena_workgroup": true,
"analytics_generic_athena_workgroup": true,
"redshift_table_expiry_seconds": "604800"
"redshift_table_expiry_seconds": "604800",
"enable_s3_data_migrate_lambda": true
},
"test": {
"project_short_id": "dpr",
Expand Down Expand Up @@ -348,7 +349,8 @@
"enable_dbt_k8s_secrets": true,
"dpr_generic_athena_workgroup": true,
"analytics_generic_athena_workgroup": true,
"redshift_table_expiry_seconds": "604800"
"redshift_table_expiry_seconds": "604800",
"enable_s3_data_migrate_lambda": true
},
"preproduction": {
"project_short_id": "dpr",
Expand Down Expand Up @@ -543,7 +545,8 @@
]
}
],
"redshift_table_expiry_seconds": "604800"
"redshift_table_expiry_seconds": "604800",
"enable_s3_data_migrate_lambda": true
},
"production": {
"project_short_id": "dpr",
Expand Down Expand Up @@ -733,7 +736,8 @@
]
}
],
"redshift_table_expiry_seconds": "86400"
"redshift_table_expiry_seconds": "86400",
"enable_s3_data_migrate_lambda": false
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
# Domain Builder Backend Lambda function
module "aws_s3_data_migrate" {
source = "./modules/lambdas/generic"

enable_lambda = local.enable_s3_data_migrate_lambda
name = local.lambda_s3_data_migrate_name
s3_bucket = local.lambda_s3_data_migrate_code_s3_bucket
s3_key = local.lambda_s3_data_migrate_code_s3_key
handler = local.lambda_s3_data_migrate_handler
runtime = local.lambda_s3_data_migrate_runtime
policies = local.lambda_s3_data_migrate_policies
tracing = local.lambda_s3_data_migrate_tracing

log_retention_in_days = local.lambda_log_retention_in_days

# Set timeout to the maximum of 900 seconds (15 minutes)
timeout = 900

# Optional: Adjust memory size if needed
memory_size = 2048

vpc_settings = {
subnet_ids = [data.aws_subnet.data_subnets_a.id, data.aws_subnet.data_subnets_b.id, data.aws_subnet.data_subnets_c.id]
security_group_ids = [aws_security_group.lambda_generic[0].id, ]
}

tags = merge(
local.all_tags,
{
Resource_Group = "dpr-operations"
Jira = "DPR2-1368"
Resource_Type = "lambda"
Name = local.lambda_s3_data_migrate_name
}
)

depends_on = [aws_iam_policy.s3_read_access_policy, aws_iam_policy.s3_read_write_policy, aws_iam_policy.kms_read_access_policy]
}
15 changes: 15 additions & 0 deletions terraform/environments/digital-prison-reporting/locals.tf
Original file line number Diff line number Diff line change
Expand Up @@ -423,4 +423,19 @@ locals {
Name = local.application_name
}
)

# DPR Operations,
# S3 Data Migration Lambda
enable_s3_data_migrate_lambda = local.application_data.accounts[local.environment].enable_s3_data_migrate_lambda
lambda_s3_data_migrate_name = "${local.project}-s3-data-lifecycle-migration-lambda"
lambda_s3_data_migrate_code_s3_bucket = module.s3_artifacts_store.bucket_id
lambda_s3_data_migrate_code_s3_key = "build-artifacts/dpr-operations/py_files/dpr-s3-data-lifecycle-migration-lambda-v1.zip"
lambda_s3_data_migrate_handler = "dpr-s3-data-lifecycle-migration-lambda-v1.lambda_handler"
lambda_s3_data_migrate_runtime = "python3.11"
lambda_s3_data_migrate_tracing = "PassThrough"
lambda_s3_data_migrate_policies = [
"arn:aws:iam::${local.account_id}:policy/${local.s3_read_access_policy}",
"arn:aws:iam::${local.account_id}:policy/${local.kms_read_access_policy}",
"arn:aws:iam::${local.account_id}:policy/${local.s3_read_write_policy}"
]
}
15 changes: 8 additions & 7 deletions terraform/environments/digital-prison-reporting/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -850,12 +850,13 @@ module "s3_structured_bucket" {

# S3 Curated
module "s3_curated_bucket" {
source = "./modules/s3_bucket"
create_s3 = local.setup_buckets
name = "${local.project}-curated-zone-${local.env}"
custom_kms_key = local.s3_kms_arn
create_notification_queue = false # For SQS Queue
enable_lifecycle = true
source = "./modules/s3_bucket"
create_s3 = local.setup_buckets
name = "${local.project}-curated-zone-${local.env}"
custom_kms_key = local.s3_kms_arn
create_notification_queue = false # For SQS Queue
enable_lifecycle = true
enable_intelligent_tiering = false

tags = merge(
local.all_tags,
Expand All @@ -866,7 +867,7 @@ module "s3_curated_bucket" {
)
}

# S3 Curated
# S3 Temp Reload
module "s3_temp_reload_bucket" {
source = "./modules/s3_bucket"
create_s3 = local.setup_buckets
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,78 +31,75 @@ resource "aws_s3_bucket_public_access_block" "storage" {
restrict_public_buckets = true
}

# Resource to define S3 bucket lifecycle configuration
resource "aws_s3_bucket_lifecycle_configuration" "lifecycle" {
#checkov:skip=CKV_AWS_300: "Ensure S3 lifecycle configuration sets period for aborting failed uploads"
# Enable the lifecycle configuration only if the variable `enable_lifecycle` is true
count = var.enable_lifecycle ? 1 : 0
# Create the lifecycle configuration if either lifecycle or Intelligent-Tiering is enabled
count = var.enable_lifecycle || var.enable_intelligent_tiering ? 1 : 0

bucket = aws_s3_bucket.storage[0].id

# Main lifecycle rule for standard categories (short_term, long_term, temporary)
rule {
id = var.name
status = "Enabled"

# Short-Term Retention Policy
# - Transitions objects to STANDARD_IA after 30 days (cost-effective storage for infrequent access).
# - Deletes objects after 90 days.
dynamic "transition" {
for_each = var.lifecycle_category == "short_term" ? [{ days = 30, storage_class = "STANDARD_IA" }] : []
content {
days = transition.value.days
storage_class = transition.value.storage_class
# Main lifecycle rule for standard categories (short_term, long_term, temporary, standard)
dynamic "rule" {
for_each = var.enable_lifecycle ? [1] : []
content {
id = var.name
status = "Enabled"

# Short-Term Retention Policy
dynamic "transition" {
for_each = var.lifecycle_category == "short_term" ? [{ days = 30, storage_class = "STANDARD_IA" }] : []
content {
days = transition.value.days
storage_class = transition.value.storage_class
}
}
}

dynamic "expiration" {
for_each = var.lifecycle_category == "short_term" ? [{ days = 90 }] : (
var.lifecycle_category == "temporary" ? [{ days = 30 }] : [])
content {
days = expiration.value.days
# Standard Retention Policy: Move to STANDARD_IA after 30 days and remain there indefinitely
dynamic "transition" {
for_each = var.lifecycle_category == "standard" ? [{ days = 30, storage_class = "STANDARD_IA" }] : []
content {
days = transition.value.days
storage_class = transition.value.storage_class
}
}

# Expiration logic for short-term and temporary categories
dynamic "expiration" {
for_each = var.lifecycle_category == "short_term" ? [{ days = 90 }] : (
var.lifecycle_category == "temporary" ? [{ days = 30 }] : [])
content {
days = expiration.value.days
}
}
}

# Long-Term Retention Policy
# - Transitions objects to progressively cheaper storage classes:
# - STANDARD_IA after 60 days.
# - GLACIER after 180 days.
# - DEEP_ARCHIVE after 365 days.
# - Does not delete objects (no expiration).
dynamic "transition" {
for_each = var.lifecycle_category == "long_term" ? [
{ days = 60, storage_class = "STANDARD_IA" },
{ days = 180, storage_class = "GLACIER" },
{ days = 365, storage_class = "DEEP_ARCHIVE" }
] : []
content {
days = transition.value.days
storage_class = transition.value.storage_class
# Long-Term Retention Policy
dynamic "transition" {
for_each = var.lifecycle_category == "long_term" ? [
{ days = 30, storage_class = "STANDARD_IA" },
{ days = 180, storage_class = "GLACIER" },
{ days = 365, storage_class = "DEEP_ARCHIVE" }
] : []
content {
days = transition.value.days
storage_class = transition.value.storage_class
}
}
}
}

# Dynamic rule for custom expiration rules
# - Allows adding additional lifecycle policies dynamically using the `override_expiration_rules` variable.
# - Each custom rule is defined with:
# - A unique prefix to filter objects (e.g., "reports/", "dpr/").
# - An expiration time in days for objects under that prefix.
# - The `id` for each rule is derived dynamically based on the prefix (slashes `/` are replaced with dashes `-` for compatibility).
# - Rules are enabled or disabled based on the `enable_lifecycle_expiration` variable.
dynamic "rule" {
for_each = var.override_expiration_rules
content {
# Generate rule ID without worrying about trailing slashes in the prefix
id = "${var.name}-${rule.value.prefix}"
status = var.enable_lifecycle_expiration ? "Enabled" : "Disabled"
# Intelligent-Tiering rule (applied if enable_intelligent_tiering is true)
rule {
id = "${var.name}-intelligent-tiering"
status = var.enable_intelligent_tiering ? "Enabled" : "Disabled"

filter {
# Append '/' directly in the filter block to ensure proper prefix format
prefix = "${rule.value.prefix}/"
}
filter {
# Apply to all objects
prefix = ""
}

expiration {
days = rule.value.days
}
transition {
# Move objects to Intelligent-Tiering storage class
days = 0 # Immediately move to Intelligent-Tiering
storage_class = "INTELLIGENT_TIERING"
}
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@

variable "name" {
type = string
description = "Name of the Bucket"
default = ""
}
Expand All @@ -24,13 +23,11 @@ variable "cloudtrail_access_policy" {
}

variable "s3_notification_name" {
type = string
description = "S3 Notification Event Name"
default = "s3-notification-event"
}

variable "create_s3" {
type = bool
description = "Setup S3 Buckets"
default = false
}
Expand All @@ -42,25 +39,21 @@ variable "custom_kms_key" {
}

variable "create_notification_queue" {
type = bool
description = "Setup Notification Queue"
default = false
}

variable "sqs_msg_retention_seconds" {
type = number
description = "SQS Message Retention"
default = 86400
}

variable "filter_prefix" {
type = string
description = "S3 Notification Filter Prefix"
default = null
}

variable "enable_lifecycle" {
type = bool
description = "Enabled Lifecycle for S3 Storage, Default is False"
default = false
}
Expand All @@ -81,19 +74,16 @@ variable "enable_lifecycle" {
#}

variable "enable_versioning_config" {
type = string
description = "Enable Versioning Config for S3 Storage, Default is Disabled"
default = "Disabled"
}

variable "enable_s3_versioning" {
type = bool
description = "Enable Versioning for S3 Bucket, Default is false"
default = false
}

variable "enable_notification" {
type = bool
description = "Enable S3 Bucket Notifications, Default is false"
default = false
}
Expand Down Expand Up @@ -121,24 +111,28 @@ variable "dependency_lambda" {
}

variable "bucket_key" {
type = bool
description = "If Bucket Key is Enabled or Disabled"
default = true
}

## Dynamic override_expiration_rules
## Dynamic override_expiration_rules
variable "override_expiration_rules" {
type = list(object({ prefix = string, days = number }))
default = []
}

variable "lifecycle_category" {
type = string
default = "long_term" # Options: "short_term", "long_term", "temporary"
default = "standard" # Options: "short_term", "long_term", "temporary", "standard"
}

variable "enable_lifecycle_expiration" {
type = bool
description = "Enable item expiration - requires 'enable_lifecycle' and 'override_expiration_rules' to be defined/enabled."
default = false
}

variable "enable_intelligent_tiering" {
description = "Enable Intelligent-Tiering storage class for S3 bucket"
type = bool
default = false
}

0 comments on commit 883814c

Please sign in to comment.