Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

GitHub Actions Code Formatter workflow #7654

Merged
merged 1 commit into from
Sep 2, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .devcontainer/devcontainer-lock.json
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,4 @@
"integrity": "sha256:34eb8c510a11fc44abb8173519215fcb6b82715b94e647c69089ee23773c6dc8"
}
}
}
}
22 changes: 11 additions & 11 deletions terraform/environments/apex/ecs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,17 @@ module "apex-ecs" {

source = "./modules/ecs"

subnet_set_name = local.subnet_set_name
vpc_all = local.vpc_all
app_name = local.application_name
container_instance_type = local.application_data.accounts[local.environment].container_instance_type
instance_type = local.application_data.accounts[local.environment].instance_type
user_data = local.user_data
key_name = local.application_data.accounts[local.environment].key_name
task_definition = local.task_definition
ec2_desired_capacity = local.application_data.accounts[local.environment].ec2_desired_capacity
ec2_max_size = local.application_data.accounts[local.environment].ec2_max_size
ec2_min_size = local.application_data.accounts[local.environment].ec2_min_size
subnet_set_name = local.subnet_set_name
vpc_all = local.vpc_all
app_name = local.application_name
container_instance_type = local.application_data.accounts[local.environment].container_instance_type
instance_type = local.application_data.accounts[local.environment].instance_type
user_data = local.user_data
key_name = local.application_data.accounts[local.environment].key_name
task_definition = local.task_definition
ec2_desired_capacity = local.application_data.accounts[local.environment].ec2_desired_capacity
ec2_max_size = local.application_data.accounts[local.environment].ec2_max_size
ec2_min_size = local.application_data.accounts[local.environment].ec2_min_size
# task_definition_volume = local.application_data.accounts[local.environment].task_definition_volume
# network_mode = local.application_data.accounts[local.environment].network_mode
server_port = local.application_data.accounts[local.environment].server_port
Expand Down
2 changes: 1 addition & 1 deletion terraform/environments/apex/locals.tf
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ locals {

env_account_id = local.environment_management.account_ids[terraform.workspace]
app_db_password_name = "APP_APEX_DBPASSWORD_TAD"
db_hostname = "db.${local.application_name}"
db_hostname = "db.${local.application_name}"

database-instance-userdata = <<EOF
#!/bin/bash
Expand Down
10 changes: 5 additions & 5 deletions terraform/environments/apex/modules/ecs/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,11 @@ data "aws_subnets" "shared-private" {
}

resource "aws_autoscaling_group" "cluster-scaling-group" {
vpc_zone_identifier = sort(data.aws_subnets.shared-private.ids)
name = "${var.app_name}-cluster-scaling-group"
desired_capacity = var.ec2_desired_capacity
max_size = var.ec2_max_size
min_size = var.ec2_min_size
vpc_zone_identifier = sort(data.aws_subnets.shared-private.ids)
name = "${var.app_name}-cluster-scaling-group"
desired_capacity = var.ec2_desired_capacity
max_size = var.ec2_max_size
min_size = var.ec2_min_size
protect_from_scale_in = true

launch_template {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ locals {

baseline_presets_preproduction = {
options = {
cloudwatch_metric_alarms_default_actions = ["pagerduty"]
cloudwatch_metric_alarms_default_actions = ["pagerduty"]
sns_topics = {
pagerduty_integrations = {
pagerduty = "corporate-staff-rostering-preproduction"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@ locals {

baseline_presets_production = {
options = {
cloudwatch_metric_alarms_default_actions = ["pagerduty"]
db_backup_lifecycle_rule = "rman_backup_one_month"
cloudwatch_metric_alarms_default_actions = ["pagerduty"]
db_backup_lifecycle_rule = "rman_backup_one_month"
sns_topics = {
pagerduty_integrations = {
pagerduty = "corporate-staff-rostering-production"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,14 @@ resource "aws_cloudwatch_metric_alarm" "rds_cpu_over_threshold" {
}

resource "aws_cloudwatch_metric_alarm" "rds_memory_over_threshold" {
count = var.create_rds ? 1 : 0
alarm_name = "${var.name}-rds-memory-threshold"
alarm_description = "Triggers alarm if RDS Memory crosses a threshold"
namespace = "AWS/RDS"
metric_name = "FreeableMemory"
statistic = "Average"
period = "60"
evaluation_periods = "10"
count = var.create_rds ? 1 : 0
alarm_name = "${var.name}-rds-memory-threshold"
alarm_description = "Triggers alarm if RDS Memory crosses a threshold"
namespace = "AWS/RDS"
metric_name = "FreeableMemory"
statistic = "Average"
period = "60"
evaluation_periods = "10"
alarm_actions = [var.sns_topic_arn]
ok_actions = [var.sns_topic_arn]
threshold = "800000000"
Expand All @@ -54,14 +54,14 @@ resource "aws_cloudwatch_metric_alarm" "rds_memory_over_threshold" {
}

resource "aws_cloudwatch_metric_alarm" "rds_read_latency_over_threshold" {
count = var.create_rds ? 1 : 0
alarm_name = "${var.name}-rds-read-latency-threshold"
alarm_description = "Triggers alarm if RDS read latency crosses a threshold"
namespace = "AWS/RDS"
metric_name = "ReadLatency"
statistic = "Average"
period = "60"
evaluation_periods = "5"
count = var.create_rds ? 1 : 0
alarm_name = "${var.name}-rds-read-latency-threshold"
alarm_description = "Triggers alarm if RDS read latency crosses a threshold"
namespace = "AWS/RDS"
metric_name = "ReadLatency"
statistic = "Average"
period = "60"
evaluation_periods = "5"
alarm_actions = [var.sns_topic_arn]
ok_actions = [var.sns_topic_arn]
threshold = "5"
Expand All @@ -81,14 +81,14 @@ resource "aws_cloudwatch_metric_alarm" "rds_read_latency_over_threshold" {
}

resource "aws_cloudwatch_metric_alarm" "rds_write_latency_over_threshold" {
count = var.create_rds ? 1 : 0
alarm_name = "${var.name}-rds-write-latency-threshold"
alarm_description = "Triggers alarm if RDS write latency crosses a threshold"
namespace = "AWS/RDS"
metric_name = "WriteLatency"
statistic = "Average"
period = "60"
evaluation_periods = "5"
count = var.create_rds ? 1 : 0
alarm_name = "${var.name}-rds-write-latency-threshold"
alarm_description = "Triggers alarm if RDS write latency crosses a threshold"
namespace = "AWS/RDS"
metric_name = "WriteLatency"
statistic = "Average"
period = "60"
evaluation_periods = "5"
alarm_actions = [var.sns_topic_arn]
ok_actions = [var.sns_topic_arn]
threshold = "5"
Expand All @@ -108,14 +108,14 @@ resource "aws_cloudwatch_metric_alarm" "rds_write_latency_over_threshold" {
}

resource "aws_cloudwatch_metric_alarm" "rds_connections_over_threshold" {
count = var.create_rds ? 1 : 0
alarm_name = "${var.name}-rds-db-connections-threshold"
alarm_description = "Triggers alarm if RDS database connections crosses a threshold"
namespace = "AWS/RDS"
metric_name = "DatabaseConnections"
statistic = "Average"
period = "60"
evaluation_periods = "5"
count = var.create_rds ? 1 : 0
alarm_name = "${var.name}-rds-db-connections-threshold"
alarm_description = "Triggers alarm if RDS database connections crosses a threshold"
namespace = "AWS/RDS"
metric_name = "DatabaseConnections"
statistic = "Average"
period = "60"
evaluation_periods = "5"
alarm_actions = [var.sns_topic_arn]
ok_actions = [var.sns_topic_arn]
threshold = "100"
Expand All @@ -135,14 +135,14 @@ resource "aws_cloudwatch_metric_alarm" "rds_connections_over_threshold" {
}

resource "aws_cloudwatch_metric_alarm" "rds_allocated_storage_queue_depth_over_threshold" {
count = var.create_rds ? 1 : 0
alarm_name = "${var.name}-rds-queue-depth-threshold"
alarm_description = "Triggers alarm if RDS database queue depth crosses a threshold"
namespace = "AWS/RDS"
metric_name = "DiskQueueDepth"
statistic = "Average"
period = "300"
evaluation_periods = "5"
count = var.create_rds ? 1 : 0
alarm_name = "${var.name}-rds-queue-depth-threshold"
alarm_description = "Triggers alarm if RDS database queue depth crosses a threshold"
namespace = "AWS/RDS"
metric_name = "DiskQueueDepth"
statistic = "Average"
period = "300"
evaluation_periods = "5"
alarm_actions = [var.sns_topic_arn]
ok_actions = [var.sns_topic_arn]
threshold = "60"
Expand Down
2 changes: 1 addition & 1 deletion terraform/environments/digital-prison-reporting/data.tf
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ data "aws_secretsmanager_secret_version" "datamart" {
# AWS _IAM_ Policy
data "aws_iam_policy" "rds_full_access" {
#checkov:skip=CKV_AWS_275:Disallow policies from using the AWS AdministratorAccess policy

arn = "arn:aws:iam::aws:policy/AmazonRDSFullAccess"
}

Expand Down
4 changes: 2 additions & 2 deletions terraform/environments/digital-prison-reporting/kms.tf
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ data "aws_iam_policy_document" "s3-kms" {
#checkov:skip=CKV_AWS_49
#checkov:skip=CKV_AWS_108
#checkov:skip=CKV_AWS_110

effect = "Allow"
actions = ["kms:*"]
resources = ["*"]
Expand Down Expand Up @@ -190,7 +190,7 @@ resource "aws_kms_key" "operational_db" {
#checkov:skip=CKV_AWS_33
#checkov:skip=CKV_AWS_227
#checkov:skip=CKV_AWS_7

description = "Encryption key for Operational DB"
enable_key_rotation = true
key_usage = "ENCRYPT_DECRYPT"
Expand Down
2 changes: 1 addition & 1 deletion terraform/environments/digital-prison-reporting/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ module "glue_reporting_hub_job" {
# Glue Job, Reporting Hub Batch
module "glue_reporting_hub_batch_job" {
#checkov:skip=CKV_AWS_158: "Ensure that CloudWatch Log Group is encrypted by KMS, Skipping for Timebeing in view of Cost Savings”

source = "./modules/glue_job"
create_job = local.create_job
name = "${local.project}-reporting-hub-batch-${local.env}"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ resource "aws_api_gateway_method" "this" {
#checkov:skip=CKV_AWS_70:Ensure API gateway method has authorization or API key set
#checkov:skip=CKV2_AWS_53: “Ignoring AWS API gateway request validatation"
#checkov:skip=CCKV_AWS_59: "Ensure there is no open access to back-end resources through API"


authorization = "NONE"
http_method = "ANY"
Expand All @@ -92,7 +92,7 @@ resource "aws_api_gateway_integration" "this" {
resource "aws_lambda_permission" "apigw_lambda" {
#checkov:skip=CKV_AWS_364:Ensure that AWS Lambda function permissions delegated to AWS services are limited by SourceArn or SourceAccount
#checkov:skip=CKV_AWS_301:Ensure that AWS Lambda function is not publicly accessible

statement_id = "AllowExecutionFromAPIGateway"
action = "lambda:InvokeFunction"
function_name = var.lambda_name
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Create a new DMS replication instance
resource "aws_dms_replication_instance" "dms-s3-target-instance" {
#checkov:skip=CKV_AWS_222: "Ensure DMS replication instance gets all minor upgrade automatically"
#checkov:skip=CKV_AWS_212: "Ensure DMS replication instance is encrypted by KMS using a customer managed Key (CMK)"
#checkov:skip=CKV_AWS_222: "Ensure DMS replication instance gets all minor upgrade automatically"
#checkov:skip=CKV_AWS_212: "Ensure DMS replication instance is encrypted by KMS using a customer managed Key (CMK)"

count = var.setup_dms_instance ? 1 : 0

Expand Down Expand Up @@ -66,7 +66,7 @@ resource "aws_dms_replication_task" "dms-replication" {
resource "aws_dms_endpoint" "dms-s3-target-source" {
#checkov:skip=CKV2_AWS_49: "Ensure AWS Database Migration Service endpoints have SSL configured - Will resolve through Spike"
#checkov:skip=CKV_AWS_296: "Ensure DMS endpoint uses Customer Managed Key (CMK)"

count = var.setup_dms_instance ? 1 : 0

database_name = var.source_db_name
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ resource "aws_dms_endpoint" "source" {
# Create an endpoint for the target Kinesis
resource "aws_dms_endpoint" "target" {
#checkov:skip=CKV2_AWS_49: "Ensure AWS Database Migration Service endpoints have SSL configured - Will resolve through Spike"

count = var.setup_dms_instance ? 1 : 0

endpoint_id = "${var.project_id}-dms-${var.short_name}-${var.dms_target_name}-target"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ resource "aws_dms_replication_task" "dms-replication" {
# Create an endpoint for the source database
resource "aws_dms_endpoint" "dms-s3-target-source" {
#checkov:skip=CKV2_AWS_49: "Ensure AWS Database Migration Service endpoints have SSL configured - Will resolve through Spike"

count = var.setup_dms_endpoints && var.setup_dms_source_endpoint ? 1 : 0

database_name = var.source_db_name
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ resource "aws_cloudwatch_log_group" "sec_config_output" {

resource "aws_cloudwatch_log_group" "continuous_log" {
#checkov:skip=CKV_AWS_158: "Ensure that CloudWatch Log Group is encrypted by KMS, Skipping for Timebeing in view of Cost Savings”

count = var.create_job ? 1 : 0

name = "/aws-glue/jobs/${var.name}-${var.short_name}-sec-config"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
resource "aws_cloudwatch_log_group" "this" {
#checkov:skip=CKV_AWS_158: "Ensure that CloudWatch Log Group is encrypted by KMS, Skipping for Timebeing in view of Cost Savings”

count = var.enable_lambda ? 1 : 0
name = "/aws/lambda/${var.name}-function"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -419,7 +419,7 @@ resource "aws_db_parameter_group" "this" {
# Log groups will not be created if using a cluster identifier prefix
resource "aws_cloudwatch_log_group" "this" {
#checkov:skip=CKV_AWS_158: "Ensure that CloudWatch Log Group is encrypted by KMS, Skipping for Timebeing in view of Cost Savings”

for_each = toset([for log in var.enabled_cloudwatch_logs_exports : log if local.create && var.create_cloudwatch_log_group && !var.cluster_use_name_prefix])

name = "/aws/rds/cluster/${var.name}/${each.value}"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
resource "aws_secretsmanager_secret" "password" {
#checkov:skip=CKV2_AWS_57: “Ignore - Ensure Secrets Manager secrets should have automatic rotation enabled"

name = "${var.name}-password"
}

Expand Down Expand Up @@ -38,7 +38,7 @@ resource "aws_db_subnet_group" "subnets" {
resource "aws_db_instance" "default" {
#checkov:skip=CKV2_AWS_30:”Query Logging is not required"
#checkov:skip=CKV2_AWS_60: “Ignore -Ensure RDS instance with copy tags to snapshots is enabled"

count = var.enable_rds ? 1 : 0
identifier = var.name
db_name = var.db_name
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ resource "random_string" "unique_suffix" {

resource "aws_secretsmanager_secret" "redshift_connection" {
#checkov:skip=CKV2_AWS_57: “Ignore - Ensure Secrets Manager secrets should have automatic rotation enabled"

description = "Redshift connect details"
name = "${var.project_id}-redshift-secret-${var.env}"
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ resource "aws_s3_bucket" "storage" { # TBC "application_tf_state" should be gene
#checkov:skip=CKV_AWS_144
#checkov:skip=CKV2_AWS_6
#checkov:skip=CKV_AWS_21:”Not all S3 bucket requires versioning enabaled"

bucket = var.name

lifecycle {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ resource "random_password" "random_string" {

resource "aws_secretsmanager_secret" "secret" {
#checkov:skip=CKV2_AWS_57: “Ignore - Ensure Secrets Manager secrets should have automatic rotation enabled"

name = var.name == "" ? null : var.name
name_prefix = var.name == "" ? var.name_prefix : null
description = var.description
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ resource "aws_glue_connection" "glue_operational_datastore_connection" {

resource "aws_security_group" "glue_operational_datastore_connection_sg" {
#checkov:skip=CKV2_AWS_5

name = "${local.project}-operational-datastore-connection_sg"
description = "Security group to allow glue access to Operational Datastore via JDBC Connection"
vpc_id = data.aws_vpc.shared.id
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ terraform {
random = {
version = "~> 3.0"
source = "hashicorp/random"
}
}
}
required_version = "~> 1.0"
}
4 changes: 2 additions & 2 deletions terraform/environments/digital-prison-reporting/policy.tf
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ data "aws_iam_policy_document" "redshift-additional-policy" {
#checkov:skip=CKV_AWS_356: "Ensure no IAM policies documents allow "*" as a statement's resource for restrictable actions"
#checkov:skip=CKV_AWS_111: "Ensure IAM policies does not allow write access without constraints"
#checkov:skip=CKV_AWS_110: "Ensure IAM policies does not allow privilege escalation"

statement {
actions = [
"glue:*"
Expand Down Expand Up @@ -540,7 +540,7 @@ data "aws_iam_policy_document" "domain_builder_preview" {
}

resource "aws_iam_policy" "domain_builder_preview_policy" {

name = "${local.project}-domain-builder-preview-policy"
description = "Additional policy to allow execution of query previews in Athena"
policy = data.aws_iam_policy_document.domain_builder_preview.json
Expand Down
Loading
Loading