diff --git a/README.md b/README.md
index c60518d8..7bf5be19 100644
--- a/README.md
+++ b/README.md
@@ -29,7 +29,7 @@ Includes support for:
* [NIST 800-53 Revision 5](https://hub.steampipe.io/mods/turbot/aws_compliance/controls/benchmark.nist_800_53_rev_5)
* [NIST Cybersecurity Framework (CSF)](https://hub.steampipe.io/mods/turbot/aws_compliance/controls/benchmark.nist_csf)
* [Other Compliance Checks](https://hub.steampipe.io/mods/turbot/aws_compliance/controls/benchmark.other)
-* [PCI DSS v3.2.1](https://hub.steampipe.io/mods/turbot/aws_compliance/controls/benchmark.pci_v321)
+* [PCI DSS v3.2.1](https://hub.steampipe.io/mods/turbot/aws_compliance/controls/benchmark.pci_dss_v321)
* [Reserve Bank of India (RBI) Cyber Security Framework](https://hub.steampipe.io/mods/turbot/aws_compliance/controls/benchmark.rbi_cyber_security)
* [SOC 2](https://hub.steampipe.io/mods/turbot/aws_compliance/controls/benchmark.soc_2)
diff --git a/conformance_pack/acm.sp b/conformance_pack/acm.sp
index 061a34ce..14d8db48 100644
--- a/conformance_pack/acm.sp
+++ b/conformance_pack/acm.sp
@@ -20,6 +20,7 @@ control "acm_certificate_expires_30_days" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
diff --git a/conformance_pack/apigateway.sp b/conformance_pack/apigateway.sp
index 6f17eba7..047be18a 100644
--- a/conformance_pack/apigateway.sp
+++ b/conformance_pack/apigateway.sp
@@ -19,6 +19,7 @@ control "apigateway_stage_cache_encryption_at_rest_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -41,6 +42,7 @@ control "apigateway_stage_logging_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -73,6 +75,7 @@ control "apigateway_stage_use_waf_web_acl" {
fedramp_moderate_rev_4 = "true"
ffiec = "true"
nist_800_53_rev_5 = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
})
}
@@ -119,7 +122,8 @@ query "apigateway_stage_logging_enabled" {
title,
region,
account_id,
- tags
+ tags,
+ _ctx
from
aws_api_gateway_stage
union
@@ -130,7 +134,8 @@ query "apigateway_stage_logging_enabled" {
title,
region,
account_id,
- tags
+ tags,
+ _ctx
from
aws_api_gatewayv2_stage
)
@@ -201,7 +206,6 @@ query "apigateway_rest_api_authorizers_configured" {
when jsonb_array_length(a.provider_arns) > 0 then p.name || ' authorizers configured.'
else p.name || ' authorizers not configured.'
end as reason
-
${replace(local.tag_dimensions_qualifier_sql, "__QUALIFIER__", "p.")}
${replace(local.common_dimensions_qualifier_sql, "__QUALIFIER__", "p.")}
from
diff --git a/conformance_pack/autoscaling.sp b/conformance_pack/autoscaling.sp
index 4f993d4b..f4b06ae6 100644
--- a/conformance_pack/autoscaling.sp
+++ b/conformance_pack/autoscaling.sp
@@ -4,6 +4,16 @@ locals {
})
}
+control "autoscaling_launch_config_requires_imdsv2" {
+ title = "Auto Scaling group should configure EC2 instances to require Instance Metadata Service Version 2 (IMDSv2)"
+ description = "This control checks whether IMDSv2 is enabled on all instances launched by Amazon EC2 Auto Scaling groups. The control fails if the Instance Metadata Service (IMDS) version is not included in the launch configuration or if both IMDSv1 and IMDSv2 are enabled."
+ query = query.autoscaling_launch_config_requires_imdsv2
+
+ tags = merge(local.conformance_pack_autoscaling_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
control "autoscaling_group_with_lb_use_health_check" {
title = "Auto Scaling groups with a load balancer should use health checks"
description = "The Elastic Load Balancer (ELB) health checks for Amazon Elastic Compute Cloud (Amazon EC2) Auto Scaling groups support maintenance of adequate capacity and availability."
@@ -19,6 +29,7 @@ control "autoscaling_group_with_lb_use_health_check" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
})
}
@@ -34,6 +45,7 @@ control "autoscaling_launch_config_public_ip_disabled" {
fedramp_moderate_rev_4 = "true"
ffiec = "true"
nist_800_53_rev_5 = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
})
}
diff --git a/conformance_pack/backup.sp b/conformance_pack/backup.sp
index 05ea37fa..c67cd245 100644
--- a/conformance_pack/backup.sp
+++ b/conformance_pack/backup.sp
@@ -16,6 +16,7 @@ control "backup_recovery_point_manual_deletion_disabled" {
hipaa = "true"
nist_800_171_rev_2 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
@@ -34,6 +35,7 @@ control "backup_plan_min_retention_35_days" {
hipaa = "true"
nist_800_171_rev_2 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
@@ -42,6 +44,7 @@ control "backup_recovery_point_encryption_enabled" {
title = "Backup recovery points should be encrypted"
description = "Ensure if a recovery point is encrypted. The rule is non compliant if the recovery point is not encrypted."
query = query.backup_recovery_point_encryption_enabled
+
tags = merge(local.conformance_pack_backup_common_tags, {
cisa_cyber_essentials = "true"
ffiec = "true"
@@ -49,6 +52,7 @@ control "backup_recovery_point_encryption_enabled" {
hipaa = "true"
nist_800_171_rev_2 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
@@ -63,6 +67,7 @@ control "backup_recovery_point_min_retention_35_days" {
ffiec = "true"
gxp_eu_annex_11 = "true"
nist_800_171_rev_2 = "true"
+ pci_dss_v321 = "true"
})
}
diff --git a/conformance_pack/cloudformation.sp b/conformance_pack/cloudformation.sp
index 491d112f..06710dac 100644
--- a/conformance_pack/cloudformation.sp
+++ b/conformance_pack/cloudformation.sp
@@ -4,6 +4,16 @@ locals {
})
}
+control "cloudformation_stack_drift_detection_check" {
+ title = "CloudFormation stacks differ from the expected configuration"
+ description = "Ensure if the actual configuration of a Cloud Formation stack differs, or has drifted, from the expected configuration, a stack is considered to have drifted if one or more of its resources differ from their expected configuration."
+ query = query.cloudformation_stack_drift_detection_check
+
+ tags = merge(local.conformance_pack_cloudformation_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
control "cloudformation_stack_output_no_secrets" {
title = "CloudFormation stacks outputs should not have any secrets"
description = "Ensure CloudFormation stacks outputs do not contain secrets like user names, passwords, and tokens. It is recommended to remove secrets since outputs cannot be encrypted resulting in any entity with basic read-metadata-only and access to CloudFormation outputs having access to these secrets."
@@ -44,6 +54,27 @@ control "cloudformation_stack_termination_protection_enabled" {
})
}
+query "cloudformation_stack_drift_detection_check" {
+ sql = <<-EOQ
+ select
+ id as resource,
+ case
+ when stack_drift_status = 'IN_SYNC' then 'ok'
+ when stack_drift_status = 'DRIFTED' then 'alarm'
+ else 'skip'
+ end as status,
+ case
+ when stack_drift_status = 'IN_SYNC' then title || ' drift status is ' || stack_drift_status || '.'
+ when stack_drift_status = 'DRIFTED' then title || ' drift status is ' || stack_drift_status || '.'
+ else title || ' drift status is ' || stack_drift_status || '.'
+ end as reason
+ ${local.tag_dimensions_sql}
+ ${local.common_dimensions_sql}
+ from
+ aws_cloudformation_stack;
+ EOQ
+}
+
query "cloudformation_stack_output_no_secrets" {
sql = <<-EOQ
with stack_output as (
diff --git a/conformance_pack/cloudfront.sp b/conformance_pack/cloudfront.sp
index fac19dc4..4c8f0983 100644
--- a/conformance_pack/cloudfront.sp
+++ b/conformance_pack/cloudfront.sp
@@ -10,9 +10,10 @@ control "cloudfront_distribution_encryption_in_transit_enabled" {
query = query.cloudfront_distribution_encryption_in_transit_enabled
tags = merge(local.conformance_pack_cloudfront_common_tags, {
- gdpr = "true"
- hipaa = "true"
- soc_2 = "true"
+ gdpr = "true"
+ hipaa = "true"
+ pci_dss_v321 = "true"
+ soc_2 = "true"
})
}
@@ -46,6 +47,26 @@ control "cloudfront_distribution_non_s3_origins_encryption_in_transit_enabled" {
})
}
+control "cloudfront_distribution_no_deprecated_ssl_protocol" {
+ title = "CloudFront distributions should not use deprecated SSL protocols between edge locations and custom origins"
+ description = "This control checks if Amazon CloudFront distributions are using deprecated SSL protocols for HTTPS communication between CloudFront edge locations and your custom origins. This control fails if a CloudFront distribution has a CustomOriginConfig where OriginSslProtocols includes SSLv3."
+ query = query.cloudfront_distribution_no_deprecated_ssl_protocol
+
+ tags = merge(local.conformance_pack_cloudfront_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
+control "cloudfront_distribution_custom_origins_encryption_in_transit_enabled" {
+ title = "CloudFront distributions should encrypt traffic to custom origins"
+ description = "This control checks if Amazon CloudFront distributions are encrypting traffic to custom origins. This control fails for a CloudFront distribution whose origin protocol policy allows 'http-only'. This control also fails if the distribution's origin protocol policy is 'match-viewer' while the viewer protocol policy is 'allow-all'."
+ query = query.cloudfront_distribution_custom_origins_encryption_in_transit_enabled
+
+ tags = merge(local.conformance_pack_cloudfront_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
control "cloudfront_distribution_logging_enabled" {
title = "CloudFront distributions access logs should be enabled"
description = "This control checks if Amazon CloudFront distributions are configured to capture information from Amazon Simple Storage Service (Amazon S3) server access logs. This rule is non compliant if a CloudFront distribution does not have logging configured."
diff --git a/conformance_pack/cloudtrail.sp b/conformance_pack/cloudtrail.sp
index 244d052f..f2cd3bb1 100644
--- a/conformance_pack/cloudtrail.sp
+++ b/conformance_pack/cloudtrail.sp
@@ -22,6 +22,7 @@ control "cloudtrail_trail_integrated_with_logs" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -46,6 +47,7 @@ control "cloudtrail_s3_data_events_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -69,6 +71,7 @@ control "cloudtrail_trail_logs_encrypted_with_kms_cmk" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -92,6 +95,7 @@ control "cloudtrail_multi_region_trail_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -113,6 +117,7 @@ control "cloudtrail_trail_validation_enabled" {
nist_800_171_rev_2 = "true"
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
@@ -135,6 +140,7 @@ control "cloudtrail_trail_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -228,7 +234,7 @@ query "cloudtrail_s3_data_events_enabled" {
aws_s3_bucket as b
left join s3_selectors on bucket_selector like (b.arn || '%') or bucket_selector = 'arn:aws:s3'
group by
- b.account_id, b.region, b.arn, b.name, b.tags;
+ b.account_id, b.region, b.arn, b.name, b.tags, b._ctx;
EOQ
}
diff --git a/conformance_pack/cloudwatch.sp b/conformance_pack/cloudwatch.sp
index 8af55c82..1a542257 100644
--- a/conformance_pack/cloudwatch.sp
+++ b/conformance_pack/cloudwatch.sp
@@ -18,6 +18,7 @@ control "cloudwatch_alarm_action_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
@@ -49,6 +50,7 @@ control "log_group_encryption_at_rest_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -69,6 +71,7 @@ control "cloudwatch_log_group_retention_period_365" {
nist_800_171_rev_2 = "true"
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -115,8 +118,9 @@ control "log_metric_filter_iam_policy" {
query = query.log_metric_filter_iam_policy
tags = merge(local.conformance_pack_cloudwatch_common_tags, {
- gdpr = "true"
- nist_csf = "true"
+ gdpr = "true"
+ nist_csf = "true"
+ pci_dss_v321 = "true"
})
}
diff --git a/conformance_pack/codebuild.sp b/conformance_pack/codebuild.sp
index 1d5883ba..053f6213 100644
--- a/conformance_pack/codebuild.sp
+++ b/conformance_pack/codebuild.sp
@@ -9,7 +9,7 @@ control "codebuild_project_build_greater_then_90_days" {
description = "Ensure CodeBuild projects are curently in use. It is recommended to remove the stale ones."
query = query.codebuild_project_build_greater_then_90_days
- tags = merge(local.conformance_pack_ecs_common_tags, {
+ tags = merge(local.conformance_pack_codebuild_common_tags, {
other_checks = "true"
})
}
@@ -20,6 +20,7 @@ control "codebuild_project_plaintext_env_variables_no_sensitive_aws_values" {
query = query.codebuild_project_plaintext_env_variables_no_sensitive_aws_values
tags = merge(local.conformance_pack_codebuild_common_tags, {
+ pci_dss_v321 = "true"
cis_controls_v8_ig1 = "true"
cisa_cyber_essentials = "true"
fedramp_low_rev_4 = "true"
@@ -38,6 +39,7 @@ control "codebuild_project_source_repo_oauth_configured" {
query = query.codebuild_project_source_repo_oauth_configured
tags = merge(local.conformance_pack_codebuild_common_tags, {
+ pci_dss_v321 = "true"
cis_controls_v8_ig1 = "true"
cisa_cyber_essentials = "true"
fedramp_low_rev_4 = "true"
@@ -55,13 +57,22 @@ control "codebuild_project_with_user_controlled_buildspec" {
description = "This control checks if buildspec.yml is used from a trusted source which user cant interfere with."
query = query.codebuild_project_with_user_controlled_buildspec
- tags = merge(local.conformance_pack_ecs_common_tags, {
+ tags = merge(local.conformance_pack_codebuild_common_tags, {
other_checks = "true"
})
}
+control "codebuild_project_environment_privileged_mode_disabled" {
+ title = "CodeBuild project environments should not have privileged mode enabled"
+ description = "This control checks if an AWS CodeBuild project environment has privileged mode enabled. This control fails when an AWS CodeBuild project environment has privileged mode enabled."
+ query = query.codebuild_project_environment_privileged_mode_disabled
+ tags = merge(local.conformance_pack_codebuild_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
control "codebuild_project_logging_enabled" {
- title = "CodeBuild project logging should be enabled"
+ title = "CodeBuild projects should have logging enabled"
description = "This control checks if an AWS CodeBuild project environment has at least one log option enabled. The rule is non compliant if the status of all present log configurations is set to 'DISABLED'."
query = query.codebuild_project_logging_enabled
diff --git a/conformance_pack/codedeploy.sp b/conformance_pack/codedeploy.sp
new file mode 100644
index 00000000..1b32e8f4
--- /dev/null
+++ b/conformance_pack/codedeploy.sp
@@ -0,0 +1,38 @@
+locals {
+ conformance_pack_codedeploy_common_tags = merge(local.aws_compliance_common_tags, {
+ service = "AWS/CodeDeploy"
+ })
+}
+
+control "codedeploy_deployment_group_lambda_allatonce_traffic_shift_disabled" {
+ title = "Codedeploy deployment groups lambda allatonce traffic shift should be disabled"
+ description = "This control checks if the deployment group for Lambda Compute Platform is not using the default deployment configuration. The rule is non compliant if the deployment group is using the deployment configuration 'CodeDeployDefault.LambdaAllAtOnce'."
+ query = query.codedeploy_deployment_group_lambda_allatonce_traffic_shift_disabled
+
+ tags = merge(local.conformance_pack_codedeploy_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
+query "codedeploy_deployment_group_lambda_allatonce_traffic_shift_disabled" {
+ sql = <<-EOQ
+ select
+ g.arn as resource,
+ case
+ when a.compute_platform <> 'Lambda' then 'skip'
+ when deployment_config_name = 'CodeDeployDefault.LambdaAllAtOnce' then 'alarm'
+ else 'ok'
+ end as status,
+ case
+ when a.compute_platform <> 'Lambda' then g.title || ' using ' || a.compute_platform || ' compute platform.'
+ else g.title || ' using '|| deployment_config_name || ' deployment config.'
+ end as reason
+ ${replace(local.tag_dimensions_qualifier_sql, "__QUALIFIER__", "g.")}
+ ${replace(local.common_dimensions_qualifier_sql, "__QUALIFIER__", "g.")}
+ from
+ aws_codedeploy_deployment_group as g,
+ aws_codedeploy_app as a
+ where
+ g.application_name = a.application_name;
+ EOQ
+}
diff --git a/conformance_pack/config.sp b/conformance_pack/config.sp
index a1373a78..fadac7f4 100644
--- a/conformance_pack/config.sp
+++ b/conformance_pack/config.sp
@@ -14,6 +14,7 @@ control "config_enabled_all_regions" {
gxp_eu_annex_11 = "true"
hipaa = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
@@ -75,4 +76,4 @@ query "config_enabled_all_regions" {
aws_region as a
left join aws_config_configuration_recorder as r on r.account_id = a.account_id and r.region = a.name;
EOQ
-}
\ No newline at end of file
+}
diff --git a/conformance_pack/dax.sp b/conformance_pack/dax.sp
index 3523701b..bc7bb036 100644
--- a/conformance_pack/dax.sp
+++ b/conformance_pack/dax.sp
@@ -13,6 +13,7 @@ control "dax_cluster_encryption_at_rest_enabled" {
gdpr = "true"
gxp_eu_annex_11 = "true"
hipaa = "true"
+ pci_dss_v321 = "true"
})
}
diff --git a/conformance_pack/dms.sp b/conformance_pack/dms.sp
index aca422dc..c1b3ed15 100644
--- a/conformance_pack/dms.sp
+++ b/conformance_pack/dms.sp
@@ -21,6 +21,7 @@ control "dms_replication_instance_not_publicly_accessible" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
diff --git a/conformance_pack/dynamodb.sp b/conformance_pack/dynamodb.sp
index fecc8ff3..08717adc 100644
--- a/conformance_pack/dynamodb.sp
+++ b/conformance_pack/dynamodb.sp
@@ -41,6 +41,7 @@ control "dynamodb_table_point_in_time_recovery_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -60,6 +61,7 @@ control "dynamodb_table_encrypted_with_kms" {
nist_800_171_rev_2 = "true"
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
})
}
@@ -80,6 +82,7 @@ control "dynamodb_table_in_backup_plan" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -94,6 +97,7 @@ control "dynamodb_table_encryption_enabled" {
gdpr = "true"
gxp_eu_annex_11 = "true"
hipaa = "true"
+ pci_dss_v321 = "true"
})
}
@@ -110,6 +114,7 @@ control "dynamodb_table_protected_by_backup_plan" {
hipaa = "true"
nist_800_171_rev_2 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
diff --git a/conformance_pack/ebs.sp b/conformance_pack/ebs.sp
index 0347a639..851476d3 100644
--- a/conformance_pack/ebs.sp
+++ b/conformance_pack/ebs.sp
@@ -21,6 +21,7 @@ control "ebs_snapshot_not_publicly_restorable" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -61,6 +62,7 @@ control "ebs_attached_volume_encryption_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
})
}
@@ -81,6 +83,7 @@ control "ebs_volume_in_backup_plan" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -114,6 +117,7 @@ control "ebs_volume_protected_by_backup_plan" {
hipaa = "true"
nist_800_171_rev_2 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
diff --git a/conformance_pack/ec2.sp b/conformance_pack/ec2.sp
index 56853a90..438dbe8d 100644
--- a/conformance_pack/ec2.sp
+++ b/conformance_pack/ec2.sp
@@ -18,6 +18,7 @@ control "ec2_ebs_default_encryption_enabled" {
hipaa = "true"
nist_800_171_rev_2 = "true"
nist_800_53_rev_5 = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
@@ -54,6 +55,7 @@ control "ec2_instance_in_vpc" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -76,6 +78,7 @@ control "ec2_instance_not_publicly_accessible" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -127,6 +130,7 @@ control "ec2_instance_ebs_optimized" {
nist_800_171_rev_2 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
@@ -160,6 +164,7 @@ control "ec2_instance_protected_by_backup_plan" {
hipaa = "true"
nist_800_171_rev_2 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
diff --git a/conformance_pack/efs.sp b/conformance_pack/efs.sp
index 9e69ee06..7195c39e 100644
--- a/conformance_pack/efs.sp
+++ b/conformance_pack/efs.sp
@@ -15,10 +15,10 @@ control "efs_file_system_encrypt_data_at_rest" {
gxp_21_cfr_part_11 = "true"
gxp_eu_annex_11 = "true"
hipaa = "true"
+ nist_800_171_rev_2 = "true"
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
- nist_800_171_rev_2 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -39,6 +39,7 @@ control "efs_file_system_in_backup_plan" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -57,6 +58,7 @@ control "efs_file_system_protected_by_backup_plan" {
hipaa = "true"
nist_800_171_rev_2 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
@@ -69,6 +71,7 @@ control "efs_file_system_encrypted_with_cmk" {
tags = merge(local.conformance_pack_efs_common_tags, {
cisa_cyber_essentials = "true"
other_checks = "true"
+ pci_dss_v321 = "true"
})
}
@@ -82,6 +85,16 @@ control "efs_file_system_enforces_ssl" {
})
}
+control "efs_access_point_enforce_user_identity" {
+ title = "EFS access points should enforce a user identity"
+ description = "This control checks whether Amazon EFS access points are configured to enforce a user identity. This control fails if a POSIX user identity is not defined while creating the EFS access point."
+ query = query.efs_access_point_enforce_user_identity
+
+ tags = merge(local.conformance_pack_efs_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
query "efs_file_system_encrypt_data_at_rest" {
sql = <<-EOQ
select
diff --git a/conformance_pack/eks.sp b/conformance_pack/eks.sp
index b87db856..f6969c73 100644
--- a/conformance_pack/eks.sp
+++ b/conformance_pack/eks.sp
@@ -12,6 +12,7 @@ control "eks_cluster_secrets_encrypted" {
tags = merge(local.conformance_pack_eks_common_tags, {
gxp_eu_annex_11 = "true"
hipaa = "true"
+ pci_dss_v321 = "true"
})
}
@@ -24,6 +25,7 @@ control "eks_cluster_endpoint_restrict_public_access" {
cis_controls_v8_ig1 = "true"
nist_800_171_rev_2 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
})
}
@@ -47,6 +49,16 @@ control "eks_cluster_no_default_vpc" {
})
}
+control "eks_cluster_with_latest_kubernetes_version" {
+ title = "EKS clusters should run on a supported Kubernetes version"
+ description = "This control checks whether an Amazon EKS cluster is running on a supported Kubernetes version. The control fails if the EKS cluster is running on an unsupported version. If your application doesn't require a specific version of Kubernetes, we recommend that you use the latest available Kubernetes version that's supported by EKS for your clusters."
+ query = query.eks_cluster_with_latest_kubernetes_version
+
+ tags = merge(local.conformance_pack_eks_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
query "eks_cluster_secrets_encrypted" {
sql = <<-EOQ
with eks_secrets_encrypted as (
diff --git a/conformance_pack/elasticache.sp b/conformance_pack/elasticache.sp
index 58c79869..bf9f150b 100644
--- a/conformance_pack/elasticache.sp
+++ b/conformance_pack/elasticache.sp
@@ -21,6 +21,7 @@ control "elasticache_redis_cluster_automatic_backup_retention_15_days" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -43,4 +44,4 @@ query "elasticache_redis_cluster_automatic_backup_retention_15_days" {
from
aws_elasticache_replication_group;
EOQ
-}
\ No newline at end of file
+}
diff --git a/conformance_pack/elb.sp b/conformance_pack/elb.sp
index c0e93fbd..5a74f5d4 100644
--- a/conformance_pack/elb.sp
+++ b/conformance_pack/elb.sp
@@ -22,6 +22,7 @@ control "elb_application_classic_lb_logging_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -62,6 +63,7 @@ control "elb_application_lb_redirect_http_request_to_https" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -77,10 +79,11 @@ control "elb_application_lb_waf_enabled" {
fedramp_low_rev_4 = "true"
fedramp_moderate_rev_4 = "true"
ffiec = "true"
- nist_800_53_rev_4 = "true"
nist_800_171_rev_2 = "true"
+ nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -99,10 +102,11 @@ control "elb_classic_lb_use_ssl_certificate" {
gdpr = "true"
gxp_21_cfr_part_11 = "true"
hipaa = "true"
- nist_800_53_rev_4 = "true"
nist_800_171_rev_2 = "true"
+ nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -115,10 +119,11 @@ control "elb_application_lb_drop_http_headers" {
tags = merge(local.conformance_pack_elb_common_tags, {
fedramp_low_rev_4 = "true"
- hipaa = "true"
gdpr = "true"
+ hipaa = "true"
nist_800_171_rev_2 = "true"
nist_800_53_rev_4 = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -140,6 +145,7 @@ control "elb_classic_lb_use_tls_https_listeners" {
nist_800_171_rev_2 = "true"
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -176,6 +182,7 @@ control "elb_application_network_lb_use_ssl_certificate" {
gxp_21_cfr_part_11 = "true"
nist_800_171_rev_2 = "true"
nist_800_53_rev_5 = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
})
}
@@ -187,6 +194,7 @@ control "elb_listener_use_secure_ssl_cipher" {
tags = merge(local.conformance_pack_elb_common_tags, {
other_checks = "true"
+ pci_dss_v321 = "true"
})
}
@@ -260,6 +268,27 @@ control "elb_tls_listener_protocol_version" {
})
}
+control "elb_application_lb_desync_mitigation_mode" {
+ title = "ELB application load balancers should be configured with defensive or strictest desync mitigation mode"
+ description = "This control checks whether an Application Load Balancer is configured with defensive or strictest desync mitigation mode. The control fails if an Application Load Balancer is not configured with defensive or strictest desync mitigation mode."
+ query = query.elb_application_lb_desync_mitigation_mode
+
+ tags = merge(local.conformance_pack_elb_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
+control "elb_classic_lb_desync_mitigation_mode" {
+ title = "ELB classic load balancers should be configured with defensive or strictest desync mitigation mode"
+ description = "This control checks whether a Classic Load Balancer is configured with defensive or strictest desync mitigation mode. This control will fail if the Classic Load Balancer is not configured with defensive or strictest desync mitigation mode."
+ query = query.elb_classic_lb_desync_mitigation_mode
+
+ tags = merge(local.conformance_pack_elb_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
+
query "elb_application_classic_lb_logging_enabled" {
sql = <<-EOQ
(
diff --git a/conformance_pack/emr.sp b/conformance_pack/emr.sp
index a627c8d8..892d1303 100644
--- a/conformance_pack/emr.sp
+++ b/conformance_pack/emr.sp
@@ -48,6 +48,7 @@ control "emr_cluster_master_nodes_no_public_ip" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
diff --git a/conformance_pack/es.sp b/conformance_pack/es.sp
index b305ecd2..20767810 100644
--- a/conformance_pack/es.sp
+++ b/conformance_pack/es.sp
@@ -21,6 +21,7 @@ control "es_domain_encryption_at_rest_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -43,6 +44,7 @@ control "es_domain_in_vpc" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -61,9 +63,10 @@ control "es_domain_node_to_node_encryption_enabled" {
gdpr = "true"
gxp_21_cfr_part_11 = "true"
hipaa = "true"
+ nist_800_171_rev_2 = "true"
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
- nist_800_171_rev_2 = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -82,6 +85,7 @@ control "es_domain_logs_to_cloudwatch" {
ffiec = "true"
gxp_21_cfr_part_11 = "true"
nist_800_53_rev_5 = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
})
}
diff --git a/conformance_pack/fsx.sp b/conformance_pack/fsx.sp
index 8f4e7bd3..b3d5c3b3 100644
--- a/conformance_pack/fsx.sp
+++ b/conformance_pack/fsx.sp
@@ -16,6 +16,7 @@ control "fsx_file_system_protected_by_backup_plan" {
gxp_eu_annex_11 = "true"
hipaa = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
diff --git a/conformance_pack/glue.sp b/conformance_pack/glue.sp
index ff33a1ea..5ebe4448 100644
--- a/conformance_pack/glue.sp
+++ b/conformance_pack/glue.sp
@@ -9,7 +9,7 @@ control "glue_dev_endpoint_cloudwatch_logs_encryption_enabled" {
description = "Ensure Glue dev endpoints have CloudWatch logs encryption enabled to protect sensitive information at rest."
query = query.glue_dev_endpoint_cloudwatch_logs_encryption_enabled
- tags = merge(local.conformance_pack_fsx_common_tags, {
+ tags = merge(local.conformance_pack_glue_common_tags, {
other_checks = "true"
})
}
@@ -19,7 +19,7 @@ control "glue_dev_endpoint_job_bookmarks_encryption_enabled" {
description = "Ensure Glue dev endpoints have job bookmark encryption enabled to protect sensitive information at rest."
query = query.glue_dev_endpoint_job_bookmark_encryption_enabled
- tags = merge(local.conformance_pack_fsx_common_tags, {
+ tags = merge(local.conformance_pack_glue_common_tags, {
other_checks = "true"
})
}
@@ -29,7 +29,7 @@ control "glue_dev_endpoint_s3_encryption_enabled" {
description = "Ensure Glue dev endpoints have S3 encryption enabled to protect sensitive information at rest."
query = query.glue_dev_endpoint_s3_encryption_enabled
- tags = merge(local.conformance_pack_fsx_common_tags, {
+ tags = merge(local.conformance_pack_glue_common_tags, {
other_checks = "true"
})
}
@@ -39,7 +39,7 @@ control "glue_job_cloudwatch_logs_encryption_enabled" {
description = "Ensure Glue jobs have CloudWatch logs encryption enabled to protect sensitive information at rest."
query = query.glue_job_cloudwatch_logs_encryption_enabled
- tags = merge(local.conformance_pack_fsx_common_tags, {
+ tags = merge(local.conformance_pack_glue_common_tags, {
other_checks = "true"
})
}
@@ -49,7 +49,7 @@ control "glue_job_bookmarks_encryption_enabled" {
description = "Ensure Glue job bookmarks have encryption enabled to protect sensitive information at rest."
query = query.glue_job_bookmarks_encryption_enabled
- tags = merge(local.conformance_pack_fsx_common_tags, {
+ tags = merge(local.conformance_pack_glue_common_tags, {
other_checks = "true"
})
}
@@ -59,7 +59,7 @@ control "glue_job_s3_encryption_enabled" {
description = "Ensure Glue jobs have S3 encryption enabled to protect sensitive information at rest."
query = query.glue_job_s3_encryption_enabled
- tags = merge(local.conformance_pack_fsx_common_tags, {
+ tags = merge(local.conformance_pack_glue_common_tags, {
other_checks = "true"
})
}
diff --git a/conformance_pack/guardduty.sp b/conformance_pack/guardduty.sp
index ff25ccb4..15992c8d 100644
--- a/conformance_pack/guardduty.sp
+++ b/conformance_pack/guardduty.sp
@@ -21,6 +21,7 @@ control "guardduty_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
@@ -39,6 +40,7 @@ control "guardduty_finding_archived" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
diff --git a/conformance_pack/iam.sp b/conformance_pack/iam.sp
index d6abed42..482de3a1 100644
--- a/conformance_pack/iam.sp
+++ b/conformance_pack/iam.sp
@@ -12,6 +12,7 @@ control "iam_account_password_policy_strong_min_reuse_24" {
tags = merge(local.conformance_pack_iam_common_tags, {
hipaa = "true"
nist_800_53_rev_4 = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
})
}
@@ -53,6 +54,7 @@ control "iam_policy_no_star_star" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -76,6 +78,7 @@ control "iam_root_user_no_access_keys" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -99,6 +102,7 @@ control "iam_root_user_hardware_mfa_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
@@ -163,6 +167,7 @@ control "iam_user_console_access_mfa_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
})
}
@@ -183,6 +188,7 @@ control "iam_user_mfa_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
})
}
@@ -204,6 +210,7 @@ control "iam_user_no_inline_attached_policies" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -226,6 +233,7 @@ control "iam_user_unused_credentials_90" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
@@ -395,6 +403,7 @@ control "iam_all_policy_no_service_wild_card" {
ffiec = "true"
gxp_21_cfr_part_11 = "true"
nist_800_53_rev_5 = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
})
}
@@ -410,6 +419,7 @@ control "iam_policy_custom_no_blocked_kms_actions" {
ffiec = "true"
gxp_21_cfr_part_11 = "true"
nist_800_171_rev_2 = "true"
+ pci_dss_v321 = "true"
})
}
@@ -421,6 +431,7 @@ control "iam_policy_inline_no_blocked_kms_actions" {
tags = merge(local.conformance_pack_iam_common_tags, {
cisa_cyber_essentials = "true"
gxp_21_cfr_part_11 = "true"
+ pci_dss_v321 = "true"
})
}
diff --git a/conformance_pack/kinesis.sp b/conformance_pack/kinesis.sp
index 7b3635a8..b7c6ab60 100644
--- a/conformance_pack/kinesis.sp
+++ b/conformance_pack/kinesis.sp
@@ -11,6 +11,7 @@ control "kinesis_stream_server_side_encryption_enabled" {
tags = merge(local.conformance_pack_kinesis_common_tags, {
other_checks = "true"
+ pci_dss_v321 = "true"
})
}
diff --git a/conformance_pack/kms.sp b/conformance_pack/kms.sp
index eca1d7df..a1a6d2e4 100644
--- a/conformance_pack/kms.sp
+++ b/conformance_pack/kms.sp
@@ -48,7 +48,8 @@ control "kms_key_decryption_restricted_in_iam_customer_managed_policy" {
query = query.kms_key_decryption_restricted_in_iam_customer_managed_policy
tags = merge(local.conformance_pack_kms_common_tags, {
- hipaa = "true"
+ hipaa = "true"
+ pci_dss_v321 = "true"
})
}
@@ -58,7 +59,8 @@ control "kms_key_decryption_restricted_in_iam_inline_policy" {
query = query.kms_key_decryption_restricted_in_iam_inline_policy
tags = merge(local.conformance_pack_kms_common_tags, {
- hipaa = "true"
+ hipaa = "true"
+ pci_dss_v321 = "true"
})
}
diff --git a/conformance_pack/lambda.sp b/conformance_pack/lambda.sp
index 3e30072f..89e023cc 100644
--- a/conformance_pack/lambda.sp
+++ b/conformance_pack/lambda.sp
@@ -14,9 +14,9 @@ control "lambda_function_dead_letter_queue_configured" {
fedramp_moderate_rev_4 = "true"
ffiec = "true"
hipaa = "true"
+ nist_800_171_rev_2 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
- nist_800_171_rev_2 = "true"
soc_2 = "true"
})
}
@@ -38,6 +38,7 @@ control "lambda_function_in_vpc" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -60,6 +61,7 @@ control "lambda_function_restrict_public_access" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
diff --git a/conformance_pack/opensearch.sp b/conformance_pack/opensearch.sp
index 69c78be8..0dc4e5bb 100644
--- a/conformance_pack/opensearch.sp
+++ b/conformance_pack/opensearch.sp
@@ -4,26 +4,116 @@ locals {
})
}
+control "opensearch_domain_encryption_at_rest_enabled" {
+ title = "OpenSearch domains should have encryption at rest enabled"
+ description = "This control checks whether Amazon OpenSearch domains have encryption-at-rest configuration enabled. The check fails if encryption at rest is not enabled."
+ query = query.opensearch_domain_encryption_at_rest_enabled
+
+ tags = merge(local.conformance_pack_opensearch_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
+control "opensearch_domain_fine_grained_access_enabled" {
+ title = "OpenSearch domains should have fine-grained access control enabled"
+ description = "This control checks whether OpenSearch domains have fine-grained access control enabled. The control fails if the fine-grained access control is not enabled. Fine-grained access control requires advanced-security-optionsin the OpenSearch parameter update-domain-config to be enabled."
+ query = query.opensearch_domain_fine_grained_access_enabled
+
+ tags = merge(local.conformance_pack_opensearch_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
+control "opensearch_domain_https_required" {
+ title = "OpenSearch domains should use HTTPS"
+ description = "This control checks whether connections to OpenSearch domains are using HTTPS. The rule is non compliant if the Amazon OpenSearch domain 'EnforceHTTPS' is not 'true' or is 'true' and 'TLSSecurityPolicy' is not in 'tlsPolicies'."
+ query = query.opensearch_domain_https_required
+
+ tags = merge(local.conformance_pack_opensearch_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
control "opensearch_domain_audit_logging_enabled" {
- title = "OpenSearch domain audit logging should be enabled"
- description = "This control checks if OpenSearch Service domains have audit logging enabled. The rule is non compliant if an OpenSearch Service domain does not have audit logging enabled."
+ title = "OpenSearch domains should have audit logging enabled"
+ description = "This control checks whether Amazon OpenSearch Service domains have audit logging enabled. The rule is non compliant if an OpenSearch Service domain does not have audit logging enabled."
query = query.opensearch_domain_audit_logging_enabled
tags = merge(local.conformance_pack_opensearch_common_tags, {
- soc_2 = "true"
+ pci_dss_v321 = "true"
+ soc_2 = "true"
})
}
control "opensearch_domain_logs_to_cloudwatch" {
- title = "OpenSearch domain should send logs to CloudWatch"
- description = "This control checks if OpenSearch Service domains are configured to send logs to Amazon CloudWatch Logs. The rule is non compliant if logging is not configured."
+ title = "OpenSearch domains logs to Amazon CloudWatch Logs"
+ description = "This control checks whether Amazon OpenSearch Service domains are configured to send logs to Amazon CloudWatch Logs. The rule is non compliant if logging is not configured."
query = query.opensearch_domain_logs_to_cloudwatch
+
tags = merge(local.conformance_pack_opensearch_common_tags, {
- soc_2 = "true"
+ pci_dss_v321 = "true"
+ soc_2 = "true"
})
}
+query "opensearch_domain_encryption_at_rest_enabled" {
+ sql = <<-EOQ
+ select
+ arn as resource,
+ case
+ when encryption_at_rest_options ->> 'Enabled' = 'false' then 'alarm'
+ else 'ok'
+ end as status,
+ case
+ when encryption_at_rest_options ->> 'Enabled' = 'false' then title || ' encryption at rest not enabled.'
+ else title || ' encryption at rest enabled.'
+ end as reason
+ ${local.tag_dimensions_sql}
+ ${local.common_dimensions_sql}
+ from
+ aws_opensearch_domain;
+ EOQ
+}
+
+query "opensearch_domain_fine_grained_access_enabled" {
+ sql = <<-EOQ
+ select
+ arn as resource,
+ case
+ when advanced_security_options is null or not (advanced_security_options -> 'Enabled')::boolean then 'alarm'
+ else 'ok'
+ end as status,
+ case
+ when advanced_security_options is null or not (advanced_security_options -> 'Enabled')::boolean then title || ' has fine-grained access control disabled.'
+ else title || ' has fine-grained access control enabled.'
+ end as reason
+ ${local.tag_dimensions_sql}
+ ${local.common_dimensions_sql}
+ from
+ aws_opensearch_domain;
+ EOQ
+}
+
+query "opensearch_domain_https_required" {
+ sql = <<-EOQ
+ select
+ arn as resource,
+ case
+ when (domain_endpoint_options ->> 'EnforceHTTPS' = 'false') or (domain_endpoint_options ->> 'EnforceHTTPS' = 'true' and domain_endpoint_options ->> 'TLSSecurityPolicy' not in ('tlsPolicies')) then 'alarm'
+ else 'ok'
+ end status,
+ case
+ when (domain_endpoint_options ->> 'EnforceHTTPS' = 'false') or (domain_endpoint_options ->> 'EnforceHTTPS' = 'true' and domain_endpoint_options ->> 'TLSSecurityPolicy' not in ('tlsPolicies')) then title || ' does not use HTTPS.'
+ else title || ' uses HTTPS.'
+ end as reason
+ ${local.tag_dimensions_sql}
+ ${local.common_dimensions_sql}
+ from
+ aws_opensearch_domain;
+ EOQ
+}
+
query "opensearch_domain_audit_logging_enabled" {
sql = <<-EOQ
select
@@ -105,45 +195,6 @@ query "opensearch_domain_logs_to_cloudwatch" {
# Non-Config rule query
-query "opensearch_domain_encryption_at_rest_enabled" {
- sql = <<-EOQ
- select
-
- arn as resource,
- case
- when encryption_at_rest_options ->> 'Enabled' = 'false' then 'alarm'
- else 'ok'
- end status,
- case
- when encryption_at_rest_options ->> 'Enabled' = 'false' then title || ' encryption at rest not enabled.'
- else title || ' encryption at rest enabled.'
- end reason
- ${local.tag_dimensions_sql}
- ${local.common_dimensions_sql}
- from
- aws_opensearch_domain;
- EOQ
-}
-
-query "opensearch_domain_fine_grained_access_enabled" {
- sql = <<-EOQ
- select
- arn as resource,
- case
- when advanced_security_options is null or not (advanced_security_options -> 'Enabled')::boolean then 'alarm'
- else 'ok'
- end as status,
- case
- when advanced_security_options is null or not (advanced_security_options -> 'Enabled')::boolean then title || ' having fine-grained access control disabled.'
- else title || ' having fine-grained access control enabled.'
- end as reason
- ${local.tag_dimensions_sql}
- ${local.common_dimensions_sql}
- from
- aws_opensearch_domain;
- EOQ
-}
-
query "opensearch_domain_in_vpc" {
sql = <<-EOQ
with public_subnets as (
@@ -184,5 +235,3 @@ query "opensearch_domain_in_vpc" {
on d.arn = p.arn;
EOQ
}
-
-
diff --git a/conformance_pack/rds.sp b/conformance_pack/rds.sp
index b73e2efe..86dbd2c2 100644
--- a/conformance_pack/rds.sp
+++ b/conformance_pack/rds.sp
@@ -22,6 +22,7 @@ control "rds_db_instance_backup_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -44,6 +45,7 @@ control "rds_db_instance_encryption_at_rest_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
})
}
@@ -85,6 +87,7 @@ control "rds_db_instance_prohibit_public_access" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -106,6 +109,7 @@ control "rds_db_snapshot_encrypted_at_rest" {
nist_800_171_rev_2 = "true"
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -129,6 +133,7 @@ control "rds_db_snapshot_prohibit_public_access" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -150,6 +155,7 @@ control "rds_db_instance_logging_enabled" {
nist_800_171_rev_2 = "true"
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -171,6 +177,7 @@ control "rds_db_instance_in_backup_plan" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -228,6 +235,7 @@ control "rds_db_cluster_iam_authentication_enabled" {
tags = merge(local.conformance_pack_rds_common_tags, {
nist_800_171_rev_2 = "true"
+ pci_dss_v321 = "true"
})
}
@@ -244,6 +252,7 @@ control "rds_db_cluster_aurora_protected_by_backup_plan" {
gxp_eu_annex_11 = "true"
hipaa = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
@@ -262,6 +271,7 @@ control "rds_db_instance_protected_by_backup_plan" {
nist_800_171_rev_2 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
@@ -274,6 +284,7 @@ control "rds_db_instance_automatic_minor_version_upgrade_enabled" {
tags = merge(local.conformance_pack_rds_common_tags, {
cisa_cyber_essentials = "true"
ffiec = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
})
}
@@ -309,6 +320,26 @@ control "rds_db_instance_ca_certificate_expires_7_days" {
})
}
+control "rds_db_instance_no_default_admin_name" {
+ title = "RDS database instances should use a custom administrator username"
+ description = "This control checks whether you've changed the administrative username for Amazon Relational Database Service (Amazon RDS) database instances from the default value. The control fails if the administrative username is set to the default value."
+ query = query.rds_db_instance_no_default_admin_name
+
+ tags = merge(local.conformance_pack_rds_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
+control "rds_db_cluster_no_default_admin_name" {
+ title = "RDS database clusters should use a custom administrator username"
+ description = "This control checks whether an Amazon RDS database cluster has changed the admin username from its default value. This rule will fail if the admin username is set to the default value."
+ query = query.rds_db_cluster_no_default_admin_name
+
+ tags = merge(local.conformance_pack_rds_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
query "rds_db_instance_backup_enabled" {
sql = <<-EOQ
select
@@ -617,7 +648,6 @@ query "rds_db_instance_deletion_protection_enabled" {
when deletion_protection then title || ' deletion protection enabled.'
else title || ' deletion protection not enabled.'
end reason
-
${local.tag_dimensions_sql}
${local.common_dimensions_sql}
from
@@ -676,7 +706,6 @@ query "rds_db_cluster_aurora_protected_by_backup_plan" {
resource_type = 'Aurora'
)
select
-
c.arn as resource,
case
when c.engine not like '%aurora%' then 'skip'
@@ -768,7 +797,6 @@ query "rds_db_cluster_deletion_protection_enabled" {
query "rds_db_instance_cloudwatch_logs_enabled" {
sql = <<-EOQ
select
-
arn as resource,
case
when enabled_cloudwatch_logs_exports is not null then 'ok'
@@ -789,7 +817,6 @@ query "rds_db_instance_cloudwatch_logs_enabled" {
query "rds_db_instance_ca_certificate_expires_7_days" {
sql = <<-EOQ
select
-
arn as resource,
case
when extract(day from (to_timestamp(certificate ->> 'ValidTill','YYYY-MM-DDTHH:MI:SS')) - current_timestamp) <= '7' then 'alarm'
@@ -798,7 +825,25 @@ query "rds_db_instance_ca_certificate_expires_7_days" {
title || ' expires ' || to_char(to_timestamp(certificate ->> 'ValidTill','YYYY-MM-DDTHH:MI:SS'), 'DD-Mon-YYYY') ||
' (' || extract(day from (to_timestamp(certificate ->> 'ValidTill','YYYY-MM-DDTHH:MI:SS')) - current_timestamp) || ' days).'
as reason
+ ${local.tag_dimensions_sql}
+ ${local.common_dimensions_sql}
+ from
+ aws_rds_db_instance;
+ EOQ
+}
+query "rds_db_instance_no_default_admin_name" {
+ sql = <<-EOQ
+ select
+ arn as resource,
+ case
+ when master_user_name in ('admin','postgres') then 'alarm'
+ else 'ok'
+ end status,
+ case
+ when master_user_name in ('admin', 'postgres') then title || ' using default master user name.'
+ else title || ' not using default master user name.'
+ end reason
${local.tag_dimensions_sql}
${local.common_dimensions_sql}
from
@@ -806,6 +851,25 @@ query "rds_db_instance_ca_certificate_expires_7_days" {
EOQ
}
+query "rds_db_cluster_no_default_admin_name" {
+ sql = <<-EOQ
+ select
+ arn as resource,
+ case
+ when master_user_name in ('admin', 'postgres') then 'alarm'
+ else 'ok'
+ end status,
+ case
+ when master_user_name in ('admin', 'postgres') then title || ' using default master user name.'
+ else title || ' not using default master user name.'
+ end reason
+ ${local.tag_dimensions_sql}
+ ${local.common_dimensions_sql}
+ from
+ aws_rds_db_cluster;
+ EOQ
+}
+
# Non-Config rule query
query "rds_db_cluster_aurora_backtracking_enabled" {
@@ -887,26 +951,6 @@ query "rds_db_cluster_multiple_az_enabled" {
EOQ
}
-query "rds_db_cluster_no_default_admin_name" {
- sql = <<-EOQ
- select
- arn as resource,
- case
- when master_user_name in ('admin','postgres') then 'alarm'
- else 'ok'
- end status,
- case
- when master_user_name in ('admin','postgres') then title || ' using default master user name.'
- else title || ' not using default master user name.'
- end reason
-
- ${local.tag_dimensions_sql}
- ${local.common_dimensions_sql}
- from
- aws_rds_db_cluster;
- EOQ
-}
-
query "rds_db_instance_and_cluster_no_default_port" {
sql = <<-EOQ
(
@@ -1015,25 +1059,6 @@ query "rds_db_instance_in_vpc" {
EOQ
}
-query "rds_db_instance_no_default_admin_name" {
- sql = <<-EOQ
- select
- arn as resource,
- case
- when master_user_name in ('admin','postgres') then 'alarm'
- else 'ok'
- end status,
- case
- when master_user_name in ('admin','postgres') then title || ' using default master user name.'
- else title || ' not using default master user name.'
- end reason
- ${local.tag_dimensions_sql}
- ${local.common_dimensions_sql}
- from
- aws_rds_db_instance;
- EOQ
-}
-
query "rds_db_parameter_group_events_subscription" {
sql = <<-EOQ
select
diff --git a/conformance_pack/redshift.sp b/conformance_pack/redshift.sp
index 5475b59d..c2d7bf17 100644
--- a/conformance_pack/redshift.sp
+++ b/conformance_pack/redshift.sp
@@ -21,6 +21,7 @@ control "redshift_cluster_encryption_in_transit_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -44,6 +45,7 @@ control "redshift_cluster_encryption_logging_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -66,6 +68,7 @@ control "redshift_cluster_prohibit_public_access" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -88,6 +91,7 @@ control "redshift_cluster_automatic_snapshots_min_7_days" {
hipaa = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -137,16 +141,104 @@ control "redshift_cluster_enhanced_vpc_routing_enabled" {
})
}
+control "redshift_cluster_no_default_admin_name" {
+ title = "Amazon Redshift clusters should not use the default Admin username"
+ description = "This control checks whether a Amazon Redshift cluster has changed the admin username from its default value. This control will fail if the admin username for a Redshift cluster is set to awsuser."
+ query = query.redshift_cluster_no_default_admin_name
+
+ tags = merge(local.conformance_pack_redshift_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
control "redshift_cluster_audit_logging_enabled" {
title = "Amazon Redshift audit logging should be enabled"
description = "This control ensures if redshift clusters are logging audits to a specific bucket. The rule is no compliant if audit logging is not enabled for a redshift cluster or if the 'bucketNames' parameter is provided but the audit logging destination does not match."
query = query.redshift_cluster_audit_logging_enabled
tags = merge(local.conformance_pack_redshift_common_tags, {
- soc_2 = "true"
+ pci_dss_v321 = "true"
+ soc_2 = "true"
})
}
+query "redshift_cluster_no_default_admin_name" {
+ sql = <<-EOQ
+ select
+ arn as resource,
+ case
+ when master_username = 'awsuser' then 'alarm'
+ else 'ok'
+ end status,
+ case
+ when master_username = 'awsuser' then title || ' using default master user name.'
+ else title || ' not using default master user name.'
+ end reason
+
+ ${local.tag_dimensions_sql}
+ ${local.common_dimensions_sql}
+ from
+ aws_redshift_cluster;
+ EOQ
+}
+
+query "redshift_cluster_audit_logging_enabled" {
+ sql = <<-EOQ
+ select
+ arn as resource,
+ case
+ when logging_status ->> 'LoggingEnabled' = 'true' then 'ok'
+ else 'alarm'
+ end as status,
+ case
+ when logging_status ->> 'LoggingEnabled' = 'true' then title || ' logging enabled.'
+ else title || ' logging disabled.'
+ end as reason
+ ${local.tag_dimensions_sql}
+ ${local.common_dimensions_sql}
+ from
+ aws_redshift_cluster;
+ EOQ
+}
+
+query "redshift_cluster_automatic_upgrade_major_versions_enabled" {
+ sql = <<-EOQ
+ select
+ arn as resource,
+ case
+ when allow_version_upgrade then 'ok'
+ else 'alarm'
+ end as status,
+ case
+ when allow_version_upgrade then title || ' automatic upgrades to major versions enabled.'
+ else title || ' automatic upgrades to major versions disabled.'
+ end as reason
+ ${local.tag_dimensions_sql}
+ ${local.common_dimensions_sql}
+ from
+ aws_redshift_cluster;
+ EOQ
+}
+
+query "redshift_cluster_no_default_database_name" {
+ sql = <<-EOQ
+ select
+ arn as resource,
+ case
+ when db_name = 'dev' then 'alarm'
+ else 'ok'
+ end as status,
+ case
+ when db_name = 'dev' then title || ' using default database name.'
+ else title || ' not using default database name.'
+ end as reason
+ ${local.tag_dimensions_sql}
+ ${local.common_dimensions_sql}
+ from
+ aws_redshift_cluster;
+ EOQ
+}
+
query "redshift_cluster_encryption_in_transit_enabled" {
sql = <<-EOQ
with pg_with_ssl as (
@@ -162,8 +254,7 @@ query "redshift_cluster_encryption_in_transit_enabled" {
and p ->> 'ParameterValue' = 'true'
)
select
-
- 'arn:aws:redshift:' || region || ':' || account_id || ':' || 'cluster' || ':' || cluster_identifier as resource,
+ arn as resource,
case
when cpg ->> 'ParameterGroupName' in (select pg_name from pg_with_ssl ) then 'ok'
else 'alarm'
@@ -183,7 +274,6 @@ query "redshift_cluster_encryption_in_transit_enabled" {
query "redshift_cluster_encryption_logging_enabled" {
sql = <<-EOQ
select
-
arn as resource,
case
when not encrypted then 'alarm'
@@ -192,7 +282,7 @@ query "redshift_cluster_encryption_logging_enabled" {
end as status,
case
when not encrypted then title || ' not encrypted.'
- when not (logging_status ->> 'LoggingEnabled') :: boolean then title || ' audit logging not enabled.'
+ when not (logging_status ->> 'LoggingEnabled')::boolean then title || ' audit logging not enabled.'
else title || ' audit logging and encryption enabled.'
end as reason
${local.tag_dimensions_sql}
@@ -205,7 +295,6 @@ query "redshift_cluster_encryption_logging_enabled" {
query "redshift_cluster_prohibit_public_access" {
sql = <<-EOQ
select
-
cluster_namespace_arn as resource,
case
when publicly_accessible then 'alarm'
@@ -226,14 +315,14 @@ query "redshift_cluster_prohibit_public_access" {
query "redshift_cluster_automatic_snapshots_min_7_days" {
sql = <<-EOQ
select
- 'arn:aws:redshift:' || region || ':' || account_id || ':' || 'cluster' || ':' || cluster_identifier as resource,
+ arn as resource,
case
when automated_snapshot_retention_period >= 7 then 'ok'
else 'alarm'
end as status,
case
when automated_snapshot_retention_period >= 7 then title || ' automatic snapshots enabled with retention period greater than equals 7 days.'
- else title || ' automatic snapshots not enabled with retention period greater than equals 7 days.'
+ else title || ' automatic snapshots enabled with retention period less than 7 days.'
end as reason
${local.tag_dimensions_sql}
${local.common_dimensions_sql}
@@ -245,7 +334,7 @@ query "redshift_cluster_automatic_snapshots_min_7_days" {
query "redshift_cluster_kms_enabled" {
sql = <<-EOQ
select
- 'arn:aws:redshift:' || region || ':' || account_id || ':' || 'cluster' || ':' || cluster_identifier as resource,
+ arn as resource,
case
when encrypted and kms_key_id is not null then 'ok'
else 'alarm'
@@ -264,7 +353,7 @@ query "redshift_cluster_kms_enabled" {
query "redshift_cluster_maintenance_settings_check" {
sql = <<-EOQ
select
- 'arn:aws:redshift:' || region || ':' || account_id || ':' || 'cluster' || ':' || cluster_identifier as resource,
+ arn as resource,
case
when allow_version_upgrade and automated_snapshot_retention_period >= 7 then 'ok'
else 'alarm'
@@ -283,7 +372,7 @@ query "redshift_cluster_maintenance_settings_check" {
query "redshift_cluster_enhanced_vpc_routing_enabled" {
sql = <<-EOQ
select
- 'arn:aws:redshift:' || region || ':' || account_id || ':' || 'cluster' || ':' || cluster_identifier as resource,
+ arn as resource,
case
when enhanced_vpc_routing then 'ok'
else 'alarm'
@@ -298,82 +387,3 @@ query "redshift_cluster_enhanced_vpc_routing_enabled" {
aws_redshift_cluster;
EOQ
}
-
-# Non-Config rule query
-
-query "redshift_cluster_automatic_upgrade_major_versions_enabled" {
- sql = <<-EOQ
- select
- 'arn:aws:redshift:' || region || ':' || account_id || ':' || 'cluster' || ':' || cluster_identifier as resource,
- case
- when allow_version_upgrade then 'ok'
- else 'alarm'
- end as status,
- case
- when allow_version_upgrade then title || ' automatic upgrades to major versions enabled.'
- else title || ' automatic upgrades to major versions disabled.'
- end as reason
- ${local.tag_dimensions_sql}
- ${local.common_dimensions_sql}
- from
- aws_redshift_cluster;
- EOQ
-}
-
-query "redshift_cluster_audit_logging_enabled" {
- sql = <<-EOQ
- select
- 'arn:aws:redshift:' || region || ':' || account_id || ':' || 'cluster' || ':' || cluster_identifier as resource,
- case
- when logging_status ->> 'LoggingEnabled' = 'true' then 'ok'
- else 'alarm'
- end as status,
- case
- when logging_status ->> 'LoggingEnabled' = 'true' then title || ' logging enabled.'
- else title || ' logging disabled.'
- end as reason
- ${local.tag_dimensions_sql}
- ${local.common_dimensions_sql}
- from
- aws_redshift_cluster;
- EOQ
-}
-
-query "redshift_cluster_no_default_admin_name" {
- sql = <<-EOQ
- select
- arn as resource,
- case
- when master_username = 'awsuser' then 'alarm'
- else 'ok'
- end status,
- case
- when master_username = 'awsuser' then title || ' using default master user name.'
- else title || ' not using default master user name.'
- end reason
-
- ${local.tag_dimensions_sql}
- ${local.common_dimensions_sql}
- from
- aws_redshift_cluster;
- EOQ
-}
-
-query "redshift_cluster_no_default_database_name" {
- sql = <<-EOQ
- select
- arn as resource,
- case
- when db_name = 'dev' then 'alarm'
- else 'ok'
- end as status,
- case
- when db_name = 'dev' then title || ' using default database name.'
- else title || ' not using default database name.'
- end as reason
- ${local.tag_dimensions_sql}
- ${local.common_dimensions_sql}
- from
- aws_redshift_cluster;
- EOQ
-}
diff --git a/conformance_pack/s3.sp b/conformance_pack/s3.sp
index 00f1c21a..e08e4510 100644
--- a/conformance_pack/s3.sp
+++ b/conformance_pack/s3.sp
@@ -21,6 +21,7 @@ control "s3_bucket_cross_region_replication_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -44,6 +45,7 @@ control "s3_bucket_default_encryption_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -67,6 +69,7 @@ control "s3_bucket_enforces_ssl" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -89,6 +92,7 @@ control "s3_bucket_logging_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -128,6 +132,7 @@ control "s3_bucket_restrict_public_read_access" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -151,6 +156,7 @@ control "s3_bucket_restrict_public_write_access" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -175,6 +181,7 @@ control "s3_bucket_versioning_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -224,6 +231,7 @@ control "s3_public_access_block_bucket_account" {
nist_800_171_rev_2 = "true"
nist_800_53_rev_4 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
})
}
@@ -244,6 +252,7 @@ control "s3_bucket_default_encryption_enabled_kms" {
hipaa = "true"
nist_800_171_rev_2 = "true"
nist_800_53_rev_5 = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
})
}
@@ -266,7 +275,6 @@ control "s3_public_access_block_bucket" {
control "s3_bucket_policy_restricts_cross_account_permission_changes" {
title = "Amazon S3 permissions granted to other AWS accounts in bucket policies should be restricted"
description = "This control checks whether the S3 bucket policy prevents principals from other AWS accounts from performing denied actions on resources in the S3 bucket."
- severity = "high"
query = query.s3_bucket_policy_restricts_cross_account_permission_changes
tags = merge(local.conformance_pack_s3_common_tags, {
@@ -292,7 +300,8 @@ control "s3_bucket_lifecycle_policy_enabled" {
query = query.s3_bucket_lifecycle_policy_enabled
tags = merge(local.conformance_pack_s3_common_tags, {
- soc_2 = "true"
+ pci_dss_v321 = "true"
+ soc_2 = "true"
})
}
@@ -302,7 +311,8 @@ control "s3_bucket_versioning_and_lifecycle_policy_enabled" {
query = query.s3_bucket_versioning_and_lifecycle_policy_enabled
tags = merge(local.conformance_pack_s3_common_tags, {
- soc_2 = "true"
+ pci_dss_v321 = "true"
+ soc_2 = "true"
})
}
@@ -845,6 +855,93 @@ query "s3_bucket_object_logging_enabled" {
EOQ
}
+query "s3_bucket_lifecycle_policy_enabled" {
+ sql = <<-EOQ
+ with lifecycle_rules_enabled as (
+ select
+ arn
+ from
+ aws_s3_bucket,
+ jsonb_array_elements(lifecycle_rules) as r
+ where
+ r ->> 'Status' = 'Enabled'
+ )
+ select
+ b.arn as resource,
+ case
+ when r.arn is not null then 'ok'
+ else 'alarm'
+ end status,
+ case
+ when r.arn is not null then name || ' lifecycle policy or rules configured.'
+ else name || ' lifecycle policy or rules not configured.'
+ end reason
+ ${replace(local.tag_dimensions_qualifier_sql, "__QUALIFIER__", "b.")}
+ ${replace(local.common_dimensions_qualifier_sql, "__QUALIFIER__", "b.")}
+ from
+ aws_s3_bucket as b
+ left join lifecycle_rules_enabled as r on r.arn = b.arn;
+ EOQ
+}
+
+query "s3_bucket_versioning_and_lifecycle_policy_enabled" {
+ sql = <<-EOQ
+ with lifecycle_rules_enabled as (
+ select
+ arn
+ from
+ aws_s3_bucket,
+ jsonb_array_elements(lifecycle_rules) as r
+ where
+ r ->> 'Status' = 'Enabled'
+ )
+ select
+ b.arn as resource,
+ case
+ when not versioning_enabled then 'alarm'
+ when versioning_enabled and r.arn is not null then 'ok'
+ else 'alarm'
+ end status,
+ case
+ when not versioning_enabled then name || ' versioning diabled.'
+ when versioning_enabled and r.arn is not null then ' lifecycle policy configured.'
+ else name || ' lifecycle policy not configured.'
+ end reason
+ ${replace(local.tag_dimensions_qualifier_sql, "__QUALIFIER__", "b.")}
+ ${replace(local.common_dimensions_qualifier_sql, "__QUALIFIER__", "b.")}
+ from
+ aws_s3_bucket as b
+ left join lifecycle_rules_enabled as r on r.arn = b.arn;
+ EOQ
+}
+
+query "s3_bucket_event_notifications_enabled" {
+ sql = <<-EOQ
+ select
+ arn as resource,
+ case
+ when
+ event_notification_configuration ->> 'EventBridgeConfiguration' is null
+ and event_notification_configuration ->> 'LambdaFunctionConfigurations' is null
+ and event_notification_configuration ->> 'QueueConfigurations' is null
+ and event_notification_configuration ->> 'TopicConfigurations' is null then 'alarm'
+ else 'ok'
+ end as status,
+ case
+ when
+ event_notification_configuration ->> 'EventBridgeConfiguration' is null
+ and event_notification_configuration ->> 'LambdaFunctionConfigurations' is null
+ and event_notification_configuration ->> 'QueueConfigurations' is null
+ and event_notification_configuration ->> 'TopicConfigurations' is null then title || ' event notifications disabled.'
+ else title || ' event notifications enabled.'
+ end as reason
+ ${local.tag_dimensions_sql}
+ ${local.common_dimensions_sql}
+ from
+ aws_s3_bucket;
+ EOQ
+}
+
# Non-Config rule query
query "s3_bucket_acls_should_prohibit_user_access" {
@@ -905,33 +1002,6 @@ query "s3_bucket_acls_should_prohibit_user_access" {
EOQ
}
-query "s3_bucket_event_notifications_enabled" {
- sql = <<-EOQ
- select
- arn as resource,
- case
- when
- event_notification_configuration ->> 'EventBridgeConfiguration' is null
- and event_notification_configuration ->> 'LambdaFunctionConfigurations' is null
- and event_notification_configuration ->> 'QueueConfigurations' is null
- and event_notification_configuration ->> 'TopicConfigurations' is null then 'alarm'
- else 'ok'
- end as status,
- case
- when
- event_notification_configuration ->> 'EventBridgeConfiguration' is null
- and event_notification_configuration ->> 'LambdaFunctionConfigurations' is null
- and event_notification_configuration ->> 'QueueConfigurations' is null
- and event_notification_configuration ->> 'TopicConfigurations' is null then title || ' event notifications disabled.'
- else title || ' event notifications enabled.'
- end as reason
- ${local.tag_dimensions_sql}
- ${local.common_dimensions_sql}
- from
- aws_s3_bucket;
- EOQ
-}
-
query "s3_bucket_mfa_delete_enabled" {
sql = <<-EOQ
select
@@ -944,7 +1014,6 @@ query "s3_bucket_mfa_delete_enabled" {
when versioning_mfa_delete then name || ' MFA delete enabled.'
else name || ' MFA delete disabled.'
end reason
-
${local.tag_dimensions_sql}
${local.common_dimensions_sql}
from
@@ -1012,63 +1081,3 @@ query "s3_bucket_public_access_blocked" {
aws_s3_bucket;
EOQ
}
-
-query "s3_bucket_versioning_and_lifecycle_policy_enabled" {
- sql = <<-EOQ
- with lifecycle_rules_enabled as (
- select
- arn
- from
- aws_s3_bucket,
- jsonb_array_elements(lifecycle_rules) as r
- where
- r ->> 'Status' = 'Enabled'
- )
- select
- b.arn as resource,
- case
- when not versioning_enabled then 'alarm'
- when versioning_enabled and r.arn is not null then 'ok'
- else 'alarm'
- end status,
- case
- when not versioning_enabled then name || ' versioning diabled.'
- when versioning_enabled and r.arn is not null then ' lifecycle policy configured.'
- else name || ' lifecycle policy not configured.'
- end reason
- ${replace(local.tag_dimensions_qualifier_sql, "__QUALIFIER__", "b.")}
- ${replace(local.common_dimensions_qualifier_sql, "__QUALIFIER__", "b.")}
- from
- aws_s3_bucket as b
- left join lifecycle_rules_enabled as r on r.arn = b.arn;
- EOQ
-}
-
-query "s3_bucket_lifecycle_policy_enabled" {
- sql = <<-EOQ
- with lifecycle_rules_enabled as (
- select
- arn
- from
- aws_s3_bucket,
- jsonb_array_elements(lifecycle_rules) as r
- where
- r ->> 'Status' = 'Enabled'
- )
- select
- b.arn as resource,
- case
- when r.arn is not null then 'ok'
- else 'alarm'
- end status,
- case
- when r.arn is not null then ' lifecycle policy or rules configured.'
- else name || ' lifecycle policy or rules not configured.'
- end reason
- ${replace(local.tag_dimensions_qualifier_sql, "__QUALIFIER__", "b.")}
- ${replace(local.common_dimensions_qualifier_sql, "__QUALIFIER__", "b.")}
- from
- aws_s3_bucket as b
- left join lifecycle_rules_enabled as r on r.arn = b.arn;
- EOQ
-}
diff --git a/conformance_pack/sagemaker.sp b/conformance_pack/sagemaker.sp
index 871c3d18..51736b06 100644
--- a/conformance_pack/sagemaker.sp
+++ b/conformance_pack/sagemaker.sp
@@ -32,6 +32,7 @@ control "sagemaker_notebook_instance_direct_internet_access_disabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -53,6 +54,7 @@ control "sagemaker_notebook_instance_encryption_at_rest_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
})
}
@@ -74,6 +76,7 @@ control "sagemaker_endpoint_configuration_encryption_at_rest_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
diff --git a/conformance_pack/secretsmanager.sp b/conformance_pack/secretsmanager.sp
index 2a0136c2..def995bc 100644
--- a/conformance_pack/secretsmanager.sp
+++ b/conformance_pack/secretsmanager.sp
@@ -52,6 +52,7 @@ control "secretsmanager_secret_encrypted_with_kms_cmk" {
tags = merge(local.conformance_pack_secretsmanager_common_tags, {
gxp_21_cfr_part_11 = "true"
nist_800_53_rev_5 = "true"
+ pci_dss_v321 = "true"
})
}
diff --git a/conformance_pack/sns.sp b/conformance_pack/sns.sp
index ee39d447..d7c327e3 100644
--- a/conformance_pack/sns.sp
+++ b/conformance_pack/sns.sp
@@ -21,6 +21,7 @@ control "sns_topic_encrypted_at_rest" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -36,6 +37,16 @@ control "sns_topic_policy_prohibit_public_access" {
})
}
+control "sns_topic_notification_delivery_status_enabled" {
+ title = "Logging of delivery status should be enabled for notification messages sent to a topic"
+ description = "This control checks whether logging is enabled for the delivery status of notification messages sent to an Amazon SNS topic for the endpoints. This control fails if the delivery status notification for messages is not enabled."
+ query = query.sns_topic_notification_delivery_status_enabled
+
+ tags = merge(local.conformance_pack_sns_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
query "sns_topic_encrypted_at_rest" {
sql = <<-EOQ
select
@@ -92,8 +103,6 @@ query "sns_topic_policy_prohibit_public_access" {
EOQ
}
-# Non-Config rule query
-
query "sns_topic_notification_delivery_status_enabled" {
sql = <<-EOQ
select
diff --git a/conformance_pack/sqs.sp b/conformance_pack/sqs.sp
index 993455f0..fd4d877d 100644
--- a/conformance_pack/sqs.sp
+++ b/conformance_pack/sqs.sp
@@ -9,7 +9,7 @@ control "sqs_queue_policy_prohibit_public_access" {
description = "Manage access to resources in the AWS Cloud by ensuring AWS SQS queues cannot be publicly accessed."
query = query.sqs_queue_policy_prohibit_public_access
- tags = merge(local.conformance_pack_sns_common_tags, {
+ tags = merge(local.conformance_pack_sqs_common_tags, {
other_checks = "true"
})
}
@@ -19,7 +19,7 @@ control "sqs_queue_dead_letter_queue_configured" {
description = "Ensure SQS queue is configured with a dead-letter queue. Dead-letter queues are useful for debugging your application or messaging system because they let you isolate problematic messages to determine why their processing didn't succeed."
query = query.sqs_queue_dead_letter_queue_configured
- tags = merge(local.conformance_pack_sns_common_tags, {
+ tags = merge(local.conformance_pack_sqs_common_tags, {
other_checks = "true"
})
}
diff --git a/conformance_pack/ssm.sp b/conformance_pack/ssm.sp
index b42804f6..d1af45a6 100644
--- a/conformance_pack/ssm.sp
+++ b/conformance_pack/ssm.sp
@@ -21,6 +21,7 @@ control "ec2_instance_ssm_managed" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -43,6 +44,7 @@ control "ssm_managed_instance_compliance_association_compliant" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -64,6 +66,7 @@ control "ssm_managed_instance_compliance_patch_compliant" {
nist_800_171_rev_2 = "true"
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
diff --git a/conformance_pack/vpc.sp b/conformance_pack/vpc.sp
index 0e20e782..047b82a0 100644
--- a/conformance_pack/vpc.sp
+++ b/conformance_pack/vpc.sp
@@ -22,6 +22,7 @@ control "vpc_flow_logs_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -38,10 +39,21 @@ control "vpc_igw_attached_to_authorized_vpc" {
nist_800_171_rev_2 = "true"
nist_800_53_rev_4 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
})
}
+control "vpc_network_acl_remote_administration" {
+ title = "Network ACLs should not allow ingress from 0.0.0.0/0 to port 22 or port 3389"
+ description = "This control checks if default ports for SSH/RDP ingress traffic for network access control lists (NACLs) is unrestricted. The rule fails if a NACL inbound entry allows a source CIDR block of '0.0.0.0/0' or '::/0' for ports 22 or 3389."
+ query = query.vpc_network_acl_remote_administration
+
+ tags = merge(local.conformance_pack_vpc_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
control "vpc_security_group_restrict_ingress_tcp_udp_all" {
title = "VPC security groups should restrict ingress TCP and UDP access from 0.0.0.0/0"
description = "Manage access to resources in the AWS Cloud by ensuring common ports are restricted on Amazon Elastic Compute Cloud (Amazon EC2) Security Groups."
@@ -58,6 +70,7 @@ control "vpc_security_group_restrict_ingress_tcp_udp_all" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -103,6 +116,7 @@ control "vpc_security_group_restrict_ingress_ssh_all" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -124,6 +138,7 @@ control "vpc_default_security_group_restricts_all_traffic" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
@@ -156,6 +171,7 @@ control "vpc_eip_associated" {
ffiec = "true"
nist_800_171_rev_2 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
soc_2 = "true"
})
}
@@ -206,6 +222,7 @@ control "vpc_route_table_restrict_public_access_to_igw" {
gxp_21_cfr_part_11 = "true"
nist_800_171_rev_2 = "true"
nist_800_53_rev_5 = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
})
}
@@ -267,6 +284,7 @@ control "vpc_endpoint_service_acceptance_required_enabled" {
tags = merge(local.conformance_pack_vpc_common_tags, {
other_checks = "true"
+ pci_dss_v321 = "true"
})
}
diff --git a/conformance_pack/waf.sp b/conformance_pack/waf.sp
index 1e0c613d..30d3f509 100644
--- a/conformance_pack/waf.sp
+++ b/conformance_pack/waf.sp
@@ -1,9 +1,73 @@
-# Non-Config rule query
+locals {
+ conformance_pack_waf_common_tags = merge(local.aws_compliance_common_tags, {
+ service = "AWS/WAF"
+ })
+}
+
+control "waf_rule_condition_attached" {
+ title = "WAF global rule should have at least one condition"
+ description = "This control checks whether an AWS WAF global rule contains any conditions. The control fails if no conditions are present within a rule."
+ query = query.waf_rule_condition_attached
+
+ tags = merge(local.conformance_pack_waf_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
+control "waf_rule_group_rule_attached" {
+ title = "WAF global rule group should have at least one rule"
+ description = "This control checks whether an AWS WAF global rule group has at least one rule. The control fails if no rules are present within a rule group."
+ query = query.waf_rule_group_rule_attached
+
+ tags = merge(local.conformance_pack_waf_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
+control "waf_web_acl_rule_attached" {
+ title = "WAF global web ACL should have at least one rule or rule group"
+ description = "This control checks whether an AWS WAF global web ACL contains at least one WAF rule or WAF rule group. The control fails if a web ACL does not contain any WAF rules or rule groups."
+ query = query.waf_web_acl_rule_attached
+
+ tags = merge(local.conformance_pack_waf_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
+control "waf_web_acl_logging_enabled" {
+ title = "WAF web ACL logging should be enabled"
+ description = "To help with logging and monitoring within your environment, enable AWS WAF logging on regional and global web ACLs. This rule is non compliant for a global web ACL, if it does not have logging enabled."
+ query = query.waf_web_acl_logging_enabled
+
+ tags = merge(local.conformance_pack_waf_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
+control "waf_regional_rule_condition_attached" {
+ title = "WAF regional rule should have at least one condition"
+ description = "This control checks whether an AWS WAF regional rule contains any conditions. The control fails if no conditions are present within a rule."
+ query = query.waf_regional_rule_condition_attached
+
+ tags = merge(local.conformance_pack_waf_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
+
+control "waf_web_acl_resource_associated" {
+ title = "WAF web ACL should be associated with an Application Load Balancer, API Gateway stage, or CloudFront distributions"
+ description = "This control checks if the web ACL is associated with an Application Load Balancer, API Gateway stage, or CloudFront distributions."
+ query = query.waf_web_acl_resource_associated
+
+ tags = merge(local.conformance_pack_waf_common_tags, {
+ pci_dss_v321 = "true"
+ })
+}
query "waf_rule_condition_attached" {
sql = <<-EOQ
select
- akas as resource,
+ rule_id as resource,
case
when predicates is null or jsonb_array_length(predicates) = 0 then 'alarm'
else 'ok'
@@ -56,3 +120,60 @@ query "waf_web_acl_rule_attached" {
aws_waf_web_acl;
EOQ
}
+
+query "waf_web_acl_logging_enabled" {
+ sql = <<-EOQ
+ select
+ arn as resource,
+ case
+ when logging_configuration is null then 'alarm'
+ else 'ok'
+ end as status,
+ case
+ when logging_configuration is null then title || ' logging disabled.'
+ else title || ' logging enabled.'
+ end as reason
+ ${local.tag_dimensions_sql}
+ ${local.common_dimensions_sql}
+ from
+ aws_waf_web_acl;
+ EOQ
+}
+
+query "waf_regional_rule_condition_attached" {
+ sql = <<-EOQ
+ select
+ akas as resource,
+ case
+ when predicates is null or jsonb_array_length(predicates) = 0 then 'alarm'
+ else 'ok'
+ end as status,
+ case
+ when predicates is null or jsonb_array_length(predicates) = 0 then title || ' has no attached conditions.'
+ else title || ' has attached conditions.'
+ end as reason
+ ${local.common_dimensions_sql}
+ from
+ aws_wafregional_rule;
+ EOQ
+}
+
+query "waf_web_acl_resource_associated" {
+ sql = <<-EOQ
+ select
+ arn as resource,
+ case
+ when jsonb_array_length(resources) > 0 then 'ok'
+ else 'alarm'
+ end as status,
+ case
+ when jsonb_array_length(resources) > 0 then title || ' associated with AWS resource(s).'
+ else title || ' not associated with AWS resource.'
+ end as reason
+ ${local.tag_dimensions_sql}
+ ${local.common_dimensions_sql}
+ from
+ aws_wafregional_web_acl;
+ EOQ
+}
+
diff --git a/conformance_pack/wafv2.sp b/conformance_pack/wafv2.sp
index 2dbd5f7e..a1bf2564 100644
--- a/conformance_pack/wafv2.sp
+++ b/conformance_pack/wafv2.sp
@@ -22,6 +22,7 @@ control "wafv2_web_acl_logging_enabled" {
nist_800_53_rev_4 = "true"
nist_800_53_rev_5 = "true"
nist_csf = "true"
+ pci_dss_v321 = "true"
rbi_cyber_security = "true"
soc_2 = "true"
})
diff --git a/foundational_security/ssm.sp b/foundational_security/ssm.sp
index 02a29f08..15d102a7 100644
--- a/foundational_security/ssm.sp
+++ b/foundational_security/ssm.sp
@@ -33,7 +33,7 @@ control "foundational_security_ssm_1" {
control "foundational_security_ssm_2" {
title = "2 All EC2 instances managed by Systems Manager should be compliant with patching requirements"
- description = "This control checks whether the compliance status of the Amazon EC2 Systems Manager patch compliance is COMPLIANT or NON_COMPLIANT after the patch installation on the instance. It only checks instances that are managed by Systems Manager Patch Manager."
+ description = "This control checks whether the compliance status of the Amazon EC2 Systems Manager patch compliance is COMPLIANT or non compliant after the patch installation on the instance. It only checks instances that are managed by Systems Manager Patch Manager."
severity = "high"
query = query.ssm_managed_instance_compliance_patch_compliant
documentation = file("./foundational_security/docs/foundational_security_ssm_2.md")
@@ -46,7 +46,7 @@ control "foundational_security_ssm_2" {
control "foundational_security_ssm_3" {
title = "3 Instances managed by Systems Manager should have an association compliance status of COMPLIANT"
- description = "This control checks whether the status of the AWS Systems Manager association compliance is COMPLIANT or NON_COMPLIANT after the association is run on an instance. The control passes if the association compliance status is COMPLIANT."
+ description = "This control checks whether the status of the AWS Systems Manager association compliance is COMPLIANT or non compliant after the association is run on an instance. The control passes if the association compliance status is COMPLIANT."
severity = "low"
query = query.ssm_managed_instance_compliance_association_compliant
documentation = file("./foundational_security/docs/foundational_security_ssm_3.md")
diff --git a/mod.sp b/mod.sp
index 07864a9b..60fc6285 100644
--- a/mod.sp
+++ b/mod.sp
@@ -77,7 +77,7 @@ mod "aws_compliance" {
require {
plugin "aws" {
- version = "0.95.0"
+ version = "0.97.0"
}
}
}
diff --git a/pci_dss_v321/docs/pci_dss_v321_overview.md b/pci_dss_v321/docs/pci_dss_v321_overview.md
new file mode 100644
index 00000000..22e223f2
--- /dev/null
+++ b/pci_dss_v321/docs/pci_dss_v321_overview.md
@@ -0,0 +1,24 @@
+To obtain the latest version of the official guide, please visit https://www.pcisecuritystandards.org/document_library?category=pcidss&document=pci_dss.
+
+## Overview
+
+The Payment Card Industry Data Security Standard (PCI DSS) was developed to encourage and enhance cardholder data security and facilitatethe broad adoption of consistent data security measures globally. PCI DSS provides a baseline of technical and operational requirements designed to protect account data. PCI DSS applies to all entities involved in payment card processing—including merchants, processors, acquirers, issuers, and service providers. PCI DSS also applies to all other entities that store, process or transmit cardholder data (CHD) and/or sensitive authentication data (SAD).
+
+PCI Data Security Standard Requirements and Security Assessment Procedures, combines the 12 PCI DSS requirements and corresponding testing procedures into a security assessment tool. It is designed for use during PCI DSS compliance assessments as part of an entity’s validation process. The following sections provide detailed guidelines and best practices to assist entities prepare for, conduct, and report the results of a PCI DSS assessment.
+
+
+PCI DSS comprises a minimum set of requirements for protecting account data, and may be enhanced by additional controls and practices to further mitigate risks, as well as local, regional and sector laws and regulations. Additionally, legislation or regulatory requirements may require specific protection of personal information or other data elements (for example, cardholder name). PCI DSS does not supersede local or regional laws, government regulations, or other legal requirements.
+
+The PCI DSS security requirements apply to all system components included in or connected to the cardholder data environment. The cardholder data environment (CDE) is comprised of people, processes and technologies that store, process, or transmit cardholder data or sensitive authentication data. “System components” include network devices, servers, computing devices, and applications. Examples of system components include but are not limited to the following:
+
+- Systems that provide security services (for example, authentication servers), facilitate segmentation (for example, internal firewalls), or may impact the security of (for example, name resolution or web redirection servers) the CDE.
+
+- Virtualization components such as virtual machines, virtual switches/routers, virtual appliances, virtual applications/desktops, and hypervisors.
+
+- Network components including but not limited to firewalls, switches, routers, wireless access points, network appliances, and other security appliances.
+
+- Server types including but not limited to web, application, database, authentication, mail, proxy, Network Time Protocol (NTP), and Domain Name System (DNS).
+
+- Applications including all purchased and custom applications, including internal and external (for example, Internet) applications.
+
+- Any other component or device located within or connected to the CDE.
\ No newline at end of file
diff --git a/pci_dss_v321/pci_dss_v321.sp b/pci_dss_v321/pci_dss_v321.sp
new file mode 100644
index 00000000..1af21152
--- /dev/null
+++ b/pci_dss_v321/pci_dss_v321.sp
@@ -0,0 +1,25 @@
+locals {
+ pci_dss_v321_common_tags = merge(local.aws_compliance_common_tags, {
+ pci_dss_v321 = "true"
+ type = "Benchmark"
+ })
+}
+
+benchmark "pci_dss_v321" {
+ title = "PCI DSS v3.2.1"
+ description = "The Payment Card Industry Data Security Standard (PCI DSS) v3.2.1 is an information security standard for entities that store, process, and/or transmit cardholder data."
+ documentation = file("./pci_dss_v321/docs/pci_dss_v321_overview.md")
+ children = [
+ benchmark.pci_dss_v321_requirement_1,
+ benchmark.pci_dss_v321_requirement_2,
+ benchmark.pci_dss_v321_requirement_3,
+ benchmark.pci_dss_v321_requirement_4,
+ benchmark.pci_dss_v321_requirement_5,
+ benchmark.pci_dss_v321_requirement_6,
+ benchmark.pci_dss_v321_requirement_7,
+ benchmark.pci_dss_v321_requirement_8,
+ benchmark.pci_dss_v321_requirement_10,
+ benchmark.pci_dss_v321_requirement_11
+ ]
+ tags = local.pci_dss_v321_common_tags
+}
diff --git a/pci_dss_v321/requirement_1.sp b/pci_dss_v321/requirement_1.sp
new file mode 100644
index 00000000..ac3fa92a
--- /dev/null
+++ b/pci_dss_v321/requirement_1.sp
@@ -0,0 +1,398 @@
+locals {
+ pci_dss_v321_requirement_1_common_tags = merge(local.pci_dss_v321_common_tags, {
+ control_set = "1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1" {
+ title = "Requirement 1: Install and maintain a firewall configuration to protect cardholder data"
+ description = "A firewall examines all network traffic and blocks those transmissions that do not meet the specified security criteria. All systems must be protected from unauthorized access from untrusted networks."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_1_1,
+ benchmark.pci_dss_v321_requirement_1_2,
+ benchmark.pci_dss_v321_requirement_1_3
+ ]
+
+ tags = local.pci_dss_v321_requirement_1_common_tags
+}
+
+benchmark "pci_dss_v321_requirement_1_1" {
+ title = "1.1 Establish and implement firewall and router configuration standards"
+ description = "Firewalls and routers are key components of the architecture that controls entry to and exit from the network. These devices are software or hardware devices that block unwanted access and manage authorized access into and out of the network. Configuration standards and procedures will help to ensure that the organization's first line of defense in the protection of its data remains strong."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_1_1_4
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1_1_4" {
+ title = "1.1.4 Requirements for a firewall at each Internet connection and between any demilitarized zone (DMZ) and the internal network zone"
+ description = "Using a firewall on every Internet connection coming into (and out of) the network, and between any DMZ and the internal network, allows the organization to monitor and control access and minimizes the chances of a malicious individual obtaining access to the internal network via an unprotected connection."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_1_1_4_c
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.1.4"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1_1_4_c" {
+ title = "1.1.4.c Observe network configurations to verify that a firewall is in place at each Internet connection and between any demilitarized zone (DMZ) and the internal network zone, per the documented configuration standards and network diagrams"
+ description = "Using a firewall on every Internet connection coming into (and out of) the network, and between any DMZ and the internal network, allows the organization to monitor and control access and minimizes the chances of a malicious individual obtaining access to the internal network via an unprotected connection."
+
+ children = [
+ control.apigateway_stage_use_waf_web_acl,
+ control.elb_application_lb_waf_enabled,
+ control.vpc_security_group_restrict_ingress_tcp_udp_all
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.1.4.c"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1_2" {
+ title = "1.2 Examine firewall and router configurations and perform the following to verify that connections are restricted between untrusted networks and system components in the cardholder data environment"
+ description = "It is essential to install network protection between the internal, trusted network and any untrusted network that is external and/or out of the entity's ability to control or manage. Failure to implement this measure correctly results in the entity being vulnerable to unauthorized access by malicious individuals or software. For firewall functionality to be effective, it must be properly configured to control and/or limit traffic into and out of the entity's network."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_1_2_1,
+ benchmark.pci_dss_v321_requirement_1_2_2,
+ benchmark.pci_dss_v321_requirement_1_2_3
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1_2_1" {
+ title = "1.2.1 Restrict inbound and outbound traffic to that which is necessary for the cardholder data environment, and specifically deny all other traffic"
+ description = "Examination of all inbound and outbound connections allows for inspection and restriction of traffic based on the source and/or destination address, thus preventing unfiltered access between untrusted and trusted environments. This prevents malicious individuals from accessing the entity's network via unauthorized IP addresses or from using services, protocols, or ports in an unauthorized manner (for example, to send data they've obtained from within the entity's network out to an untrusted server). Implementing a rule that denies all inbound and outbound traffic that is not specifically needed helps to prevent inadvertent holes that would allow unintended and potentially harmful traffic in or out."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_1_2_1_a,
+ benchmark.pci_dss_v321_requirement_1_2_1_b,
+ benchmark.pci_dss_v321_requirement_1_2_1_c
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.2.1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1_2_1_a" {
+ title = "1.2.1.a Examine firewall and router configuration standards to verify that they identify inbound and outbound traffic necessary for the cardholder data environment"
+ description = "Examination of all inbound and outbound connections allows for inspection and restriction of traffic based on the source and/or destination address, thus preventing unfiltered access between untrusted and trusted environments. This prevents malicious individuals from accessing the entity's network via unauthorized IP addresses or from using services, protocols, or ports in an unauthorized manner (for example, to send data they've obtained from within the entity's network out to an untrusted server). Implementing a rule that denies all inbound and outbound traffic that is not specifically needed helps to prevent inadvertent holes that would allow unintended and potentially harmful traffic in or out."
+
+ children = [
+ control.autoscaling_launch_config_public_ip_disabled,
+ control.dms_replication_instance_not_publicly_accessible,
+ control.ebs_snapshot_not_publicly_restorable,
+ control.es_domain_in_vpc,
+ control.lambda_function_in_vpc,
+ control.lambda_function_restrict_public_access,
+ control.rds_db_instance_prohibit_public_access,
+ control.rds_db_snapshot_prohibit_public_access,
+ control.redshift_cluster_prohibit_public_access,
+ control.s3_bucket_restrict_public_read_access,
+ control.s3_bucket_restrict_public_write_access,
+ control.s3_public_access_block_bucket_account,
+ control.sagemaker_notebook_instance_direct_internet_access_disabled,
+ control.vpc_default_security_group_restricts_all_traffic,
+ control.vpc_igw_attached_to_authorized_vpc,
+ control.vpc_network_acl_remote_administration,
+ control.vpc_route_table_restrict_public_access_to_igw,
+ control.vpc_security_group_restrict_ingress_ssh_all,
+ control.vpc_security_group_restrict_ingress_tcp_udp_all
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.2.1.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1_2_1_b" {
+ title = "1.2.1.b Examine firewall and router configurations to verify that inbound and outbound traffic is limited to that which is necessary for the cardholder data environment"
+ description = "Examination of all inbound and outbound connections allows for inspection and restriction of traffic based on the source and/or destination address, thus preventing unfiltered access between untrusted and trusted environments. This prevents malicious individuals from accessing the entity's network via unauthorized IP addresses or from using services, protocols, or ports in an unauthorized manner (for example, to send data they've obtained from within the entity's network out to an untrusted server). Implementing a rule that denies all inbound and outbound traffic that is not specifically needed helps to prevent inadvertent holes that would allow unintended and potentially harmful traffic in or out."
+
+ children = [
+ control.autoscaling_launch_config_public_ip_disabled,
+ control.dms_replication_instance_not_publicly_accessible,
+ control.ebs_snapshot_not_publicly_restorable,
+ control.es_domain_in_vpc,
+ control.lambda_function_in_vpc,
+ control.lambda_function_restrict_public_access,
+ control.rds_db_instance_prohibit_public_access,
+ control.rds_db_snapshot_prohibit_public_access,
+ control.redshift_cluster_prohibit_public_access,
+ control.s3_bucket_restrict_public_read_access,
+ control.s3_bucket_restrict_public_write_access,
+ control.s3_public_access_block_bucket_account,
+ control.sagemaker_notebook_instance_direct_internet_access_disabled,
+ control.vpc_default_security_group_restricts_all_traffic,
+ control.vpc_igw_attached_to_authorized_vpc,
+ control.vpc_network_acl_remote_administration,
+ control.vpc_route_table_restrict_public_access_to_igw,
+ control.vpc_security_group_restrict_ingress_ssh_all,
+ control.vpc_security_group_restrict_ingress_tcp_udp_all
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.2.1.b"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1_2_1_c" {
+ title = "1.2.1.c Examine firewall and router configurations to verify that all other inbound and outbound traffic is specifically denied, for example by using an explicit “deny all” or an implicit deny after allow statement"
+ description = "Examination of all inbound and outbound connections allows for inspection and restriction of traffic based on the source and/or destination address, thus preventing unfiltered access between untrusted and trusted environments. This prevents malicious individuals from accessing the entity's network via unauthorized IP addresses or from using services, protocols, or ports in an unauthorized manner (for example, to send data they've obtained from within the entity's network out to an untrusted server). Implementing a rule that denies all inbound and outbound traffic that is not specifically needed helps to prevent inadvertent holes that would allow unintended and potentially harmful traffic in or out."
+
+ children = [
+ control.autoscaling_launch_config_public_ip_disabled,
+ control.dms_replication_instance_not_publicly_accessible,
+ control.ebs_snapshot_not_publicly_restorable,
+ control.es_domain_in_vpc,
+ control.lambda_function_in_vpc,
+ control.lambda_function_restrict_public_access,
+ control.rds_db_instance_prohibit_public_access,
+ control.rds_db_snapshot_prohibit_public_access,
+ control.redshift_cluster_prohibit_public_access,
+ control.s3_bucket_restrict_public_read_access,
+ control.s3_bucket_restrict_public_write_access,
+ control.s3_public_access_block_bucket_account,
+ control.sagemaker_notebook_instance_direct_internet_access_disabled,
+ control.vpc_default_security_group_restricts_all_traffic,
+ control.vpc_igw_attached_to_authorized_vpc,
+ control.vpc_network_acl_remote_administration,
+ control.vpc_route_table_restrict_public_access_to_igw,
+ control.vpc_security_group_restrict_ingress_ssh_all,
+ control.vpc_security_group_restrict_ingress_tcp_udp_all
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.2.1.c"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1_2_2" {
+ title = "1.2.2 Secure and synchronize router configuration files"
+ description = "While the running (or active) router configuration files include the current, secure settings, the start- up files (which are used when routers are re- started or booted) must be updated with the same secure settings to ensure these settings are applied when the start-up configuration is run. Because they only run occasionally, start-up configuration files are often forgotten and are not updated. When a router re-starts and loads a start-up configuration that has not been updated with the same secure settings as those in the running configuration, it may result in weaker rules that allow malicious individuals into the network."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_1_2_2_b
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.2.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1_2_2_b" {
+ title = "1.2.2.b Examine router configurations to verify they are synchronized—for example, the running (or active) configuration matches the start-up configuration (used when machines are booted)"
+ description = "While the running (or active) router configuration files include the current, secure settings, the start- up files (which are used when routers are re- started or booted) must be updated with the same secure settings to ensure these settings are applied when the start-up configuration is run. Because they only run occasionally, start-up configuration files are often forgotten and are not updated. When a router re-starts and loads a start-up configuration that has not been updated with the same secure settings as those in the running configuration, it may result in weaker rules that allow malicious individuals into the network."
+
+ children = [
+ control.cloudformation_stack_drift_detection_check
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.2.2.b"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1_2_3" {
+ title = "1.2.3 Install perimeter firewalls between all wireless networks and the cardholder data environment, and configure these firewalls to deny or, if traffic is necessary for business purposes, permit only authorized traffic between the wireless environment and the cardholder data environment"
+ description = "The known (or unknown) implementation and exploitation of wireless technology within a network is a common path for malicious individuals to gain access to the network and cardholder data. If a wireless device or network is installed without the entity's knowledge, a malicious individual could easily and “invisibly” enter the network. If firewalls do not restrict access from wireless networks into the CDE, malicious individuals that gain unauthorized access to the wireless network can easily connect to the CDE and compromise account information. Firewalls must be installed between all wireless networks and the CDE, regardless of the purpose of the environment to which the wireless network is connected. This may include, but is not limited to, corporate networks, retail stores, guest networks, warehouse environments, etc."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_1_2_3_b
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.2.3"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1_2_3_b" {
+ title = "1.2.3.b Verify that the firewalls deny or, if traffic is necessary for business purposes, permit only authorized traffic between the wireless environment and the cardholder data environment"
+ description = "The known (or unknown) implementation and exploitation of wireless technology within a network is a common path for malicious individuals to gain access to the network and cardholder data. If a wireless device or network is installed without the entity's knowledge, a malicious individual could easily and “invisibly” enter the network. If firewalls do not restrict access from wireless networks into the CDE, malicious individuals that gain unauthorized access to the wireless network can easily connect to the CDE and compromise account information. Firewalls must be installed between all wireless networks and the CDE, regardless of the purpose of the environment to which the wireless network is connected. This may include, but is not limited to, corporate networks, retail stores, guest networks, warehouse environments, etc."
+
+ children = [
+ control.autoscaling_launch_config_public_ip_disabled,
+ control.sagemaker_notebook_instance_direct_internet_access_disabled,
+ control.vpc_default_security_group_restricts_all_traffic,
+ control.vpc_igw_attached_to_authorized_vpc,
+ control.vpc_network_acl_remote_administration,
+ control.vpc_route_table_restrict_public_access_to_igw
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.2.3.b"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1_3" {
+ title = "1.3 Examine firewall and router configurations—including but not limited to the choke router at the Internet, the DMZ router and firewall, the DMZ cardholder segment, the perimeter router, and the internal cardholder network segment—and perform the following to determine that there is no direct access between the Internet and system components in the internal cardholder network segment"
+ description = "While there may be legitimate reasons for untrusted connections to be permitted to DMZ systems (e.g., to allow public access to a web server), such connections should never be granted to systems in the internal network. A firewall's intent is to manage and control all connections between public systems and internal systems, especially those that store, process or transmit cardholder data. If direct access is allowed between public systems and the CDE, the protections offered by the firewall are bypassed, and system components storing cardholder data may be exposed to compromise."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_1_3_1,
+ benchmark.pci_dss_v321_requirement_1_3_2,
+ benchmark.pci_dss_v321_requirement_1_3_3,
+ benchmark.pci_dss_v321_requirement_1_3_4,
+ benchmark.pci_dss_v321_requirement_1_3_5,
+ benchmark.pci_dss_v321_requirement_1_3_6,
+ control.dms_replication_instance_not_publicly_accessible,
+ control.ebs_snapshot_not_publicly_restorable,
+ control.ec2_instance_in_vpc,
+ control.ec2_instance_not_publicly_accessible,
+ control.eks_cluster_endpoint_restrict_public_access,
+ control.emr_cluster_master_nodes_no_public_ip,
+ control.es_domain_in_vpc,
+ control.lambda_function_in_vpc,
+ control.lambda_function_restrict_public_access,
+ control.rds_db_instance_prohibit_public_access,
+ control.rds_db_snapshot_prohibit_public_access,
+ control.redshift_cluster_prohibit_public_access,
+ control.s3_bucket_restrict_public_read_access,
+ control.s3_public_access_block_bucket_account,
+ control.sagemaker_notebook_instance_direct_internet_access_disabled,
+ control.vpc_endpoint_service_acceptance_required_enabled,
+ control.vpc_igw_attached_to_authorized_vpc
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.3"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1_3_1" {
+ title = "1.3.1 Implement a DMZ to limit inbound traffic to only system components that provide authorized publicly accessible services, protocols, and ports"
+ description = "The DMZ is that part of the network that manages connections between the Internet (or other untrusted networks), and services that an organization needs to have available to the public (like a web server). This functionality is intended to prevent malicious individuals from accessing the organization's internal network from the Internet, or from using services, protocols, or ports in an unauthorized manner. The set of controls will examine firewall and router configurations to verify that a DMZ is implemented to limit inbound traffic to only system components that provide authorized publicly accessible services, protocols, and ports."
+
+ children = [
+ control.autoscaling_launch_config_public_ip_disabled,
+ control.dms_replication_instance_not_publicly_accessible,
+ control.ebs_snapshot_not_publicly_restorable,
+ control.es_domain_in_vpc,
+ control.lambda_function_in_vpc,
+ control.lambda_function_restrict_public_access,
+ control.rds_db_instance_prohibit_public_access,
+ control.rds_db_snapshot_prohibit_public_access,
+ control.redshift_cluster_prohibit_public_access,
+ control.s3_bucket_restrict_public_read_access,
+ control.s3_bucket_restrict_public_write_access,
+ control.s3_public_access_block_bucket_account,
+ control.sagemaker_notebook_instance_direct_internet_access_disabled,
+ control.vpc_security_group_restrict_ingress_ssh_all
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.3.1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1_3_2" {
+ title = "1.3.2 Limit inbound Internet traffic to IP addresses within the DMZ"
+ description = "The DMZ is that part of the network that manages connections between the Internet (or other untrusted networks), and services that an organization needs to have available to the public (like a web server). This functionality is intended to prevent malicious individuals from accessing the organization's internal network from the Internet, or from using services, protocols, or ports in an unauthorized manner. The set of controls will limit inbound Internet traffic to IP addresses within the DMZ."
+
+ children = [
+ control.autoscaling_launch_config_public_ip_disabled,
+ control.dms_replication_instance_not_publicly_accessible,
+ control.es_domain_in_vpc,
+ control.lambda_function_in_vpc,
+ control.lambda_function_restrict_public_access,
+ control.rds_db_instance_prohibit_public_access,
+ control.redshift_cluster_prohibit_public_access,
+ control.s3_bucket_restrict_public_read_access,
+ control.s3_bucket_restrict_public_write_access,
+ control.s3_public_access_block_bucket_account,
+ control.sagemaker_notebook_instance_direct_internet_access_disabled,
+ control.vpc_igw_attached_to_authorized_vpc,
+ control.vpc_network_acl_remote_administration,
+ control.vpc_route_table_restrict_public_access_to_igw,
+ control.vpc_security_group_restrict_ingress_tcp_udp_all
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.3.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1_3_3" {
+ title = "1.3.3 Examine firewall and router configurations to verify that anti-spoofing measures are implemented, for example internal addresses cannot pass from the Internet into the DMZ"
+ description = "Normally a packet contains the IP address of the computer that originally sent it so other computers in the network know where the packet came from. Malicious individuals will often try to spoof (or imitate) the sending IP address so that the target system believes the packet is from a trusted source. Filtering packets coming into the network helps to, among other things, ensure packets are not “spoofed” to look like they are coming from an organization's own internal network."
+
+ children = [
+ control.autoscaling_launch_config_requires_imdsv2
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.3.3"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1_3_4" {
+ title = "1.3.4 Do not allow unauthorized outbound traffic from the cardholder data environment to the Internet"
+ description = "All traffic outbound from the cardholder data environment should be evaluated to ensure that it follows established, authorized rules. Connections should be inspected to restrict traffic to only authorized communications (for example by restricting source/destination addresses/ports, and/or blocking of content). The set of controls will examine firewall and router configurations to verify that outbound traffic from the cardholder data environment to the Internet is explicitly authorized."
+
+ children = [
+ control.autoscaling_launch_config_requires_imdsv2,
+ control.dms_replication_instance_not_publicly_accessible,
+ control.es_domain_in_vpc,
+ control.lambda_function_in_vpc,
+ control.lambda_function_restrict_public_access,
+ control.rds_db_instance_prohibit_public_access,
+ control.redshift_cluster_prohibit_public_access,
+ control.s3_bucket_restrict_public_read_access,
+ control.s3_bucket_restrict_public_write_access,
+ control.s3_public_access_block_bucket_account,
+ control.sagemaker_notebook_instance_direct_internet_access_disabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.3.4"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1_3_5" {
+ title = "1.3.5 Examine firewall and router configurations to verify that the firewall permits only established connections into the internal network and denies any inbound connections not associated with a previously established session"
+ description = "A firewall that maintains the `state` (or the status) for each connection through the firewall knows whether an apparent response to a previous connection is actually a valid, authorized response (since it retains each connection's status) or is malicious traffic trying to trick the firewall into allowing the connection."
+
+ children = [
+ control.vpc_default_security_group_restricts_all_traffic,
+ control.vpc_security_group_restrict_ingress_tcp_udp_all
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.3.5"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_1_3_6" {
+ title = "1.3.6 Examine firewall and router configurations to verify that system components that store cardholder data are on an internal network zone, segregated from the DMZ and other untrusted networks"
+ description = "If cardholder data is located within the DMZ, it is easier for an external attacker to access this information, since there are fewer layers to penetrate. Securing system components that store cardholder data (such as a database) in an internal network zone that is segregated from the DMZ and other untrusted networks by a firewall can prevent unauthorized network traffic from reaching the system component."
+
+ children = [
+ control.dms_replication_instance_not_publicly_accessible,
+ control.es_domain_in_vpc,
+ control.rds_db_instance_prohibit_public_access,
+ control.rds_db_snapshot_prohibit_public_access,
+ control.redshift_cluster_prohibit_public_access,
+ control.s3_bucket_restrict_public_read_access,
+ control.s3_bucket_restrict_public_write_access,
+ control.s3_public_access_block_bucket_account,
+ control.sagemaker_notebook_instance_direct_internet_access_disabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_1_common_tags, {
+ pci_dss_v321_item_id = "1.3.6"
+ })
+}
diff --git a/pci_dss_v321/requirement_10.sp b/pci_dss_v321/requirement_10.sp
new file mode 100644
index 00000000..a5d005d9
--- /dev/null
+++ b/pci_dss_v321/requirement_10.sp
@@ -0,0 +1,552 @@
+locals {
+ pci_dss_v321_requirement_10_common_tags = merge(local.pci_dss_v321_common_tags, {
+ control_set = "10"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10" {
+ title = "Requirement 10: Track and monitor all access to network resources and cardholder data"
+ description = "Logging mechanisms and the ability to track user activities are critical in preventing, detecting, or minimizing the impact of a data compromise. The presence of logs in all environments allows thorough tracking, alerting, and analysis when something does go wrong."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_10_1,
+ benchmark.pci_dss_v321_requirement_10_2,
+ benchmark.pci_dss_v321_requirement_10_3,
+ benchmark.pci_dss_v321_requirement_10_5,
+ benchmark.pci_dss_v321_requirement_10_7,
+ benchmark.pci_dss_v321_requirement_10_8
+ ]
+
+ tags = local.pci_dss_v321_requirement_10_common_tags
+}
+
+benchmark "pci_dss_v321_requirement_10_1" {
+ title = "10.1 Implement audit trails to link all access to system components to each individual user"
+ description = "It is critical to have a process or system that links user access to system components accessed. This system generates audit logs and provides the ability to trace back suspicious activity to a specific user. This control verifies, through observation and interviewing the system administrator, that: audit trails are enabled and active for system components, access to system components is linked to individual users."
+
+ children = [
+ control.apigateway_stage_logging_enabled,
+ control.cloudtrail_multi_region_trail_enabled,
+ control.cloudtrail_s3_data_events_enabled,
+ control.cloudtrail_trail_enabled,
+ control.cloudtrail_trail_integrated_with_logs,
+ control.cloudwatch_alarm_action_enabled,
+ control.cloudwatch_log_group_retention_period_365,
+ control.elb_application_classic_lb_logging_enabled,
+ control.rds_db_instance_logging_enabled,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_logging_enabled,
+ control.vpc_flow_logs_enabled,
+ control.waf_web_acl_logging_enabled,
+ control.wafv2_web_acl_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_2" {
+ title = "10.2 Through interviews of responsible personnel, observation of audit logs, and examination of audit log settings"
+ description = "It is critical to have a process or system that links user access to system components accessed. This system generates audit logs and provides the ability to trace back suspicious activity to a specific user. This control verifies, through observation and interviewing the system administrator, that: audit trails are enabled and active for system components, access to system components is linked to individual users."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_10_2_1,
+ benchmark.pci_dss_v321_requirement_10_2_2,
+ benchmark.pci_dss_v321_requirement_10_2_3,
+ benchmark.pci_dss_v321_requirement_10_2_4,
+ benchmark.pci_dss_v321_requirement_10_2_5,
+ benchmark.pci_dss_v321_requirement_10_2_6,
+ benchmark.pci_dss_v321_requirement_10_2_7,
+ control.es_domain_logs_to_cloudwatch,
+ control.opensearch_domain_audit_logging_enabled,
+ control.opensearch_domain_logs_to_cloudwatch,
+ control.redshift_cluster_audit_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_2_1" {
+ title = "10.2.1 All individual user accesses to cardholder data"
+ description = "Malicious individuals could obtain knowledge of a user account with access to systems in the CDE, or they could create a new, unauthorized account in order to access cardholder data. A record of all individual accesses to cardholder data can identify which accounts may have been compromised or misused. This control verifies all individual access to cardholder data is loggedVerify all individual access to cardholder data is logged."
+ children = [
+ control.cloudtrail_multi_region_trail_enabled,
+ control.cloudtrail_s3_data_events_enabled,
+ control.cloudtrail_trail_enabled,
+ control.cloudtrail_trail_integrated_with_logs,
+ control.rds_db_instance_logging_enabled,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.2.1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_2_2" {
+ title = "10.2.2 All actions taken by any individual with root or administrative privileges"
+ description = "Accounts with increased privileges, such as the “administrator” or “root” account, have the potential to greatly impact the security or operational functionality of a system. Without a log of the activities performed, an organization is unable to trace any issues resulting from an administrative mistake or misuse of privilege back to the specific action and individual. This control verifies all actions taken by any individual with root or administrative privileges are logged."
+ children = [
+ control.cloudtrail_multi_region_trail_enabled,
+ control.cloudtrail_s3_data_events_enabled,
+ control.cloudtrail_trail_enabled,
+ control.cloudtrail_trail_integrated_with_logs,
+ control.rds_db_instance_logging_enabled,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.2.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_2_3" {
+ title = "10.2.3 Access to all audit trails"
+ description = "Malicious users often attempt to alter audit logs to hide their actions, and a record of access allows an organization to trace any inconsistencies or potential tampering of the logs to an individual account. Having access to logs identifying changes, additions, and deletions can help retrace steps made by unauthorized personnel. This control verifies access to all audit trails is logged."
+ children = [
+ control.cloudtrail_multi_region_trail_enabled,
+ control.cloudtrail_s3_data_events_enabled,
+ control.cloudtrail_trail_enabled,
+ control.cloudtrail_trail_integrated_with_logs,
+ control.rds_db_instance_logging_enabled,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.2.3"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_2_4" {
+ title = "10.2.4 Invalid logical access attempts"
+ description = "Malicious users often attempt to alter audit logs to hide their actions, and a record of access allows an organization to trace any inconsistencies or potential tampering of the logs to an individual account. Having access to logs identifying changes, additions, and deletions can help retrace steps made by unauthorized personnel. This control verifies access to all audit trails is logged."
+ children = [
+ control.cloudtrail_multi_region_trail_enabled,
+ control.cloudtrail_s3_data_events_enabled,
+ control.cloudtrail_trail_enabled,
+ control.cloudtrail_trail_integrated_with_logs,
+ control.rds_db_instance_logging_enabled,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.2.4"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_2_5" {
+ title = "10.2.5 Use of and changes to identification and authentication mechanisms—including but not limited to creation of new accounts and elevation of privileges—and all changes, additions, or deletions to accounts with root or administrative privileges"
+ description = "Without knowing who was logged on at the time of an incident, it is impossible to identify the accounts that may have been used. Additionally, malicious users may attempt to manipulate the authentication controls with the intent of bypassing them or impersonating a valid account."
+ children = [
+ benchmark.pci_dss_v321_requirement_10_2_5_a
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.2.5"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_2_5_a" {
+ title = "10.2.5.a Verify use of identification and authentication mechanisms is logged"
+ description = "Without knowing who was logged on at the time of an incident, it is impossible to identify the accounts that may have been used. Additionally, malicious users may attempt to manipulate the authentication controls with the intent of bypassing them or impersonating a valid account."
+ children = [
+ control.cloudtrail_trail_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.2.5.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_2_6" {
+ title = "10.2.6 Initialization, stopping, or pausing of the audit logs"
+ description = "Turning the audit logs off (or pausing them) prior to performing illicit activities is a common practice for malicious users wishing to avoid detection. Initialization of audit logs could indicate that the log function was disabled by a user to hide their actions. This control verifies the following are logged: initialization of audit logs, stopping or pausing of audit logs."
+ children = [
+ control.cloudtrail_multi_region_trail_enabled,
+ control.cloudtrail_s3_data_events_enabled,
+ control.cloudtrail_trail_enabled,
+ control.cloudtrail_trail_integrated_with_logs,
+ control.rds_db_instance_logging_enabled,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.2.6"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_2_7" {
+ title = "10.2.7 Creation and deletion of system- level objects"
+ description = "Malicious software, such as malware, often creates or replaces system level objects on the target system in order to control a particular function or operation on that system. By logging when system-level objects, such as database tables or stored procedures, are created or deleted, it will be easier to determine whether such modifications were authorized. This control verifies creation and deletion of system level objects are logged."
+ children = [
+ control.cloudtrail_multi_region_trail_enabled,
+ control.cloudtrail_s3_data_events_enabled,
+ control.cloudtrail_trail_enabled,
+ control.cloudtrail_trail_integrated_with_logs,
+ control.rds_db_instance_logging_enabled,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.2.7"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_3" {
+ title = "10.3 Record at least the following audit trail entries for all system components for each event"
+ description = "By recording these details for the auditable events at 10.2, a potential compromise can be quickly identified, and with sufficient detail to know who, what, where, when, and how. Through interviews and observation of audit logs, for each auditable event (from 10.2), perform the following:"
+
+ children = [
+ benchmark.pci_dss_v321_requirement_10_3_1,
+ benchmark.pci_dss_v321_requirement_10_3_2,
+ benchmark.pci_dss_v321_requirement_10_3_3,
+ benchmark.pci_dss_v321_requirement_10_3_4,
+ benchmark.pci_dss_v321_requirement_10_3_5,
+ benchmark.pci_dss_v321_requirement_10_3_6,
+ control.cloudtrail_multi_region_trail_enabled,
+ control.cloudtrail_s3_data_events_enabled,
+ control.cloudtrail_trail_enabled,
+ control.cloudtrail_trail_integrated_with_logs,
+ control.rds_db_instance_logging_enabled,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.3"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_3_1" {
+ title = "10.3.1 User identification"
+ description = "By recording these details for the auditable events at 10.2, a potential compromise can be quickly identified, and with sufficient detail to know who, what, where, when, and how. This control verifies user identification is included in log entries."
+
+ children = [
+ control.cloudtrail_multi_region_trail_enabled,
+ control.cloudtrail_s3_data_events_enabled,
+ control.cloudtrail_trail_enabled,
+ control.cloudtrail_trail_integrated_with_logs,
+ control.es_domain_logs_to_cloudwatch,
+ control.rds_db_instance_logging_enabled,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.3.1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_3_2" {
+ title = "10.3.2 Type of event"
+ description = "By recording these details for the auditable events at 10.2, a potential compromise can be quickly identified, and with sufficient detail to know who, what, where, when, and how. This control verifies event is included in log entries."
+
+ children = [
+ control.cloudtrail_multi_region_trail_enabled,
+ control.cloudtrail_s3_data_events_enabled,
+ control.cloudtrail_trail_enabled,
+ control.cloudtrail_trail_integrated_with_logs,
+ control.es_domain_logs_to_cloudwatch,
+ control.rds_db_instance_logging_enabled,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.3.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_3_3" {
+ title = "10.3.3 Date and time"
+ description = "By recording these details for the auditable events at 10.2, a potential compromise can be quickly identified, and with sufficient detail to know who, what, where, when, and how. This verifies date and time stamp is included in log entries."
+
+ children = [
+ control.cloudtrail_multi_region_trail_enabled,
+ control.cloudtrail_s3_data_events_enabled,
+ control.cloudtrail_trail_enabled,
+ control.cloudtrail_trail_integrated_with_logs,
+ control.es_domain_logs_to_cloudwatch,
+ control.rds_db_instance_logging_enabled,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.3.3"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_3_4" {
+ title = "10.3.4 Success or failure indication"
+ description = "By recording these details for the auditable events at 10.2, a potential compromise can be quickly identified, and with sufficient detail to know who, what, where, when, and how. This control verifies success or failure indication is included in log entries."
+
+ children = [
+ control.cloudtrail_multi_region_trail_enabled,
+ control.cloudtrail_s3_data_events_enabled,
+ control.cloudtrail_trail_enabled,
+ control.cloudtrail_trail_integrated_with_logs,
+ control.es_domain_logs_to_cloudwatch,
+ control.rds_db_instance_logging_enabled,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.3.4"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_3_5" {
+ title = "10.3.5 Origination of event"
+ description = "By recording these details for the auditable events at 10.2, a potential compromise can be quickly identified, and with sufficient detail to know who, what, where, when, and how. This control verifies origination of event is included in log entries."
+
+ children = [
+ control.cloudtrail_multi_region_trail_enabled,
+ control.cloudtrail_s3_data_events_enabled,
+ control.cloudtrail_trail_enabled,
+ control.cloudtrail_trail_integrated_with_logs,
+ control.es_domain_logs_to_cloudwatch,
+ control.rds_db_instance_logging_enabled,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.3.5"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_3_6" {
+ title = "10.3.6 Identity or name of affected data, system component, or resource"
+ description = "By recording these details for the auditable events at 10.2, a potential compromise can be quickly identified, and with sufficient detail to know who, what, where, when, and how. This control verifies identity or name of affected data, system component, or resources is included in log entries."
+
+ children = [
+ control.cloudtrail_multi_region_trail_enabled,
+ control.cloudtrail_s3_data_events_enabled,
+ control.cloudtrail_trail_enabled,
+ control.cloudtrail_trail_integrated_with_logs,
+ control.es_domain_logs_to_cloudwatch,
+ control.rds_db_instance_logging_enabled,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.3.6"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_5" {
+ title = "10.5 Interview system administrators and examine system configurations and permissions to verify that audit trails are secured so that they cannot be altered"
+ description = "Often a malicious individual who has entered the network will attempt to edit the audit logs in order to hide their activity. Without adequate protection of audit logs, their completeness, accuracy, and integrity cannot be guaranteed, and the audit logs can be rendered useless as an investigation tool after a compromise. This control checks secure audit trails so they cannot be altered."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_10_5_2,
+ benchmark.pci_dss_v321_requirement_10_5_3,
+ benchmark.pci_dss_v321_requirement_10_5_4,
+ benchmark.pci_dss_v321_requirement_10_5_5,
+ control.cloudtrail_multi_region_trail_enabled,
+ control.cloudtrail_s3_data_events_enabled,
+ control.cloudtrail_trail_enabled,
+ control.cloudtrail_trail_integrated_with_logs,
+ control.es_domain_logs_to_cloudwatch,
+ control.rds_db_instance_logging_enabled,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.5"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_5_2" {
+ title = "10.5.2 Current audit trail files are protected from unauthorized modifications via access control mechanisms, physical segregation, and/or network segregation"
+ description = "Adequate protection of the audit logs includes strong access control (limit access to logs based on “need to know” only), and use of physical or network segregation to make the logs harder to find and modify. Promptly backing up the logs to a centralized log server or media that is difficult to alter keeps the logs protected even if the system generating the logs becomes compromised. Protect audit trail files from unauthorized modifications."
+
+ children = [
+ control.cloudtrail_trail_validation_enabled,
+ control.config_enabled_all_regions,
+ control.s3_bucket_versioning_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.5.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_5_3" {
+ title = "10.5.3 Current audit trail files are promptly backed up to a centralized log server or media that is difficult to alter"
+ description = "Adequate protection of the audit logs includes strong access control (limit access to logs based on “need to know” only), and use of physical or network segregation to make the logs harder to find and modify. Promptly backing up the logs to a centralized log server or media that is difficult to alter keeps the logs protected even if the system generating the logs becomes compromised. Promptly back up audit trail files to a centralized log server or media that is difficult to alter."
+
+ children = [
+ control.cloudtrail_trail_integrated_with_logs,
+ control.s3_bucket_cross_region_replication_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.5.3"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_5_4" {
+ title = "10.5.4 Logs for external-facing technologies (for example, wireless, firewalls, DNS, mail) are written onto a secure, centralized, internal log server or media"
+ description = "By writing logs from external-facing technologies such as wireless, firewalls, DNS, and mail servers, the risk of those logs being lost or altered is lowered, as they are more secure within the internal network. Logs may be written directly, or offloaded or copied from external systems, to the secure internal system or media."
+
+ children = [
+ control.cloudtrail_multi_region_trail_enabled,
+ control.cloudtrail_s3_data_events_enabled,
+ control.cloudtrail_trail_enabled,
+ control.cloudtrail_trail_integrated_with_logs,
+ control.es_domain_logs_to_cloudwatch,
+ control.rds_db_instance_logging_enabled,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.5.4"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_5_5" {
+ title = "10.5.5 Examine system settings, monitored files, and results from monitoring activities to verify the use of file-integrity monitoring or change-detection software on logs"
+ description = "File-integrity monitoring or change-detection systems check for changes to critical files, and notify when such changes are noted. For file- integrity monitoring purposes, an entity usually monitors files that don't regularly change, but when changed indicate a possible compromise."
+
+ children = [
+ control.cloudtrail_trail_validation_enabled,
+ control.s3_bucket_versioning_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.5.5"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_7" {
+ title = "10.7 Retain audit trail history for at least one year, with a minimum of three months immediately available for analysis (for example, online, archived, or restorable from backup)"
+ description = "Retaining logs for at least a year allows for the fact that it often takes a while to notice that a compromise has occurred or is occurring, and allows investigators sufficient log history to better determine the length of time of a potential breach and potential system(s) impacted. By having three months of logs immediately available, an entity can quickly identify and minimize impact of a data breach. Storing logs in off-line locations could prevent them from being readily available, resulting in longer time frames to restore log data, perform analysis, and identify impacted systems or data."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_10_7_a,
+ benchmark.pci_dss_v321_requirement_10_7_b,
+ benchmark.pci_dss_v321_requirement_10_7_c,
+ control.cloudwatch_log_group_retention_period_365
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.7"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_7_a" {
+ title = "10.7.a Examine security policies and procedures to verify that they define audit log retention policies and procedures for retaining audit logs for at least one year, with a minimum of three months immediately available online"
+ description = "Retaining logs for at least a year allows for the fact that it often takes a while to notice that a compromise has occurred or is occurring, and allows investigators sufficient log history to better determine the length of time of a potential breach and potential system(s) impacted. By having three months of logs immediately available, an entity can quickly identify and minimize impact of a data breach. Storing logs in off-line locations could prevent them from being readily available, resulting in longer time frames to restore log data, perform analysis, and identify impacted systems or data."
+
+ children = [
+ control.backup_plan_min_retention_35_days,
+ control.backup_recovery_point_manual_deletion_disabled,
+ control.backup_recovery_point_min_retention_35_days,
+ control.s3_bucket_lifecycle_policy_enabled,
+ control.s3_bucket_versioning_and_lifecycle_policy_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.7.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_7_b" {
+ title = "10.7.b Interview personnel and examine audit logs to verify that audit logs are retained for at least one year"
+ description = "Retaining logs for at least a year allows for the fact that it often takes a while to notice that a compromise has occurred or is occurring, and allows investigators sufficient log history to better determine the length of time of a potential breach and potential system(s) impacted. By having three months of logs immediately available, an entity can quickly identify and minimize impact of a data breach. Storing logs in off-line locations could prevent them from being readily available, resulting in longer time frames to restore log data, perform analysis, and identify impacted systems or data."
+
+ children = [
+ control.cloudwatch_log_group_retention_period_365
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.7.b"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_7_c" {
+ title = "10.7.c Interview personnel and observe processes to verify that at least the last three months' logs are immediately available for analysis"
+ description = "Retaining logs for at least a year allows for the fact that it often takes a while to notice that a compromise has occurred or is occurring, and allows investigators sufficient log history to better determine the length of time of a potential breach and potential system(s) impacted. By having three months of logs immediately available, an entity can quickly identify and minimize impact of a data breach. Storing logs in off-line locations could prevent them from being readily available, resulting in longer time frames to restore log data, perform analysis, and identify impacted systems or data."
+
+ children = [
+ control.cloudwatch_log_group_retention_period_365
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.7.c"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_8" {
+ title = "10.8 Additional requirement for service providers only to implement a process for the timely detection and reporting of failures of critical security control systems, including but not limited to failure of firewalls, IDS/IPS, FIM, anti-virus, physical access controls, logical access controls, audit logging mechanisms and segmentation controls"
+ description = "Note: This requirement applies only when the entity being assessed is a service provider. Without formal processes to detect and alert when critical security controls fail, failures may go undetected for extended periods and provide attackers ample time to compromise systems and steal sensitive data from the cardholder data environment. The specific types of failures may vary depending on the function of the device and technology in use. Typical failures include a system ceasing to perform its security function or not functioning in its intended manner; for example, a firewall erasing all its rules or going offline."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_10_8_1,
+ benchmark.pci_dss_v321_requirement_10_8_b
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.8"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_8_1" {
+ title = "10.8.1 Additional requirement for service providers only: Respond to failures of any critical security controls in a timely manner"
+ description = "Processes for responding to failures in security controls must include restoring security functions, identifying and documenting the duration (date and time start to end) of the security failure, identifying and documenting cause(s) of failure, including root cause, and documenting remediation required to address root cause, identifying and addressing any security issues that arose during the failure, performing a risk assessment to determine whether further actions are required as a result of the security failure, implementing controls to prevent cause of failure from reoccurring and resuming monitoring of security controls. Note: This requirement applies only when the entity being assessed is a service provider. If critical security control failures alerts are not quickly and effectively responded to, attackers may use this time to insert malicious software, gain control of a system, or steal data from the entity's environment. Documented evidence (e.g., records within a problem management system) should support that processes and procedures are in place to respond to security failures. In addition, personnel should be aware of their responsibilities in the event of a failure. Actions and responses to the failure should be captured in the documented evidence."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_10_8_1_a
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.8.1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_8_1_a" {
+ title = "10.8.1.a Examine documented policies and procedures and interview personnel to verify processes are defined and implemented to respond to a security control failure"
+ description = "This include restoring security functions, identifying and documenting the duration (date and time start to end) of the security failure, identifying and documenting cause(s) of failure, including root cause, and documenting remediation required to address root cause, identifying and addressing any security issues that arose during the failure, performing a risk assessment to determine whether further actions are required as a result of the security failure, implementing controls to prevent cause of failure from reoccurring and resuming monitoring of security controls. Note: This requirement applies only when the entity being assessed is a service provider. If critical security control failures alerts are not quickly and effectively responded to, attackers may use this time to insert malicious software, gain control of a system, or steal data from the entity's environment. Documented evidence (e.g., records within a problem management system) should support that processes and procedures are in place to respond to security failures. In addition, personnel should be aware of their responsibilities in the event of a failure. Actions and responses to the failure should be captured in the documented evidence."
+
+ children = [
+ control.cloudwatch_alarm_action_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.8.1.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_10_8_b" {
+ title = "10.8.b Examine detection and alerting processes and interview personnel to verify that processes are implemented for all critical security controls"
+ description = "The failure of a critical security control results in the generation of an alert.10.8.b Examine detection and alerting processes and interview personnel to verify that processes are implemented for all critical security controls, and that failure of a critical security control results in the generation of an alert. Note: This requirement applies only when the entity being assessed is a service provider. Without formal processes to detect and alert when critical security controls fail, failures may go undetected for extended periods and provide attackers ample time to compromise systems and steal sensitive data from the cardholder data environment. The specific types of failures may vary depending on the function of the device and technology in use. Typical failures include a system ceasing to perform its security function or not functioning in its intended manner; for example, a firewall erasing all its rules or going offline."
+
+ children = [
+ control.cloudtrail_trail_enabled,
+ control.cloudwatch_alarm_action_enabled,
+ control.es_domain_logs_to_cloudwatch,
+ control.opensearch_domain_audit_logging_enabled,
+ control.opensearch_domain_logs_to_cloudwatch,
+ control.redshift_cluster_audit_logging_enabled,
+ control.sns_topic_notification_delivery_status_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_10_common_tags, {
+ pci_dss_v321_item_id = "10.8.b"
+ })
+}
diff --git a/pci_dss_v321/requirement_11.sp b/pci_dss_v321/requirement_11.sp
new file mode 100644
index 00000000..d1e7f22d
--- /dev/null
+++ b/pci_dss_v321/requirement_11.sp
@@ -0,0 +1,112 @@
+locals {
+ pci_dss_v321_requirement_11_common_tags = merge(local.pci_dss_v321_common_tags, {
+ control_set = "11"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_11" {
+ title = "Requirement 11: Regularly test security systems and processes"
+ description = "Vulnerabilities are being discovered continually by malicious individuals and researchers, and being introduced by new software. System components, processes, and custom software should be tested frequently to ensure security controls continue to reflect a changing environment."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_11_4,
+ benchmark.pci_dss_v321_requirement_11_5
+ ]
+
+ tags = local.pci_dss_v321_requirement_11_common_tags
+}
+
+benchmark "pci_dss_v321_requirement_11_4" {
+ title = "11.4 Use intrusion-detection and/or intrusion-prevention techniques to detect and/or prevent intrusions into the network"
+ description = "Monitor all traffic at the perimeter of the cardholder data environment as well as at critical points in the cardholder data environment, and alert personnel to suspected compromises. Keep all intrusion-detection and prevention engines, baselines, and signatures up to date. Intrusion detection and/or intrusion prevention techniques (such as IDS/IPS) compare the traffic coming into the network with known “signatures” and/or behaviors of thousands of compromise types (hacker tools, Trojans, and other malware), and send alerts and/or stop the attempt as it happens. Without a proactive approach to unauthorized activity detection, attacks on (or misuse of) computer resources could go unnoticed in real time. Security alerts generated by these techniques should be monitored so that the attempted intrusions can be stopped."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_11_4_a,
+ benchmark.pci_dss_v321_requirement_11_4_b,
+ benchmark.pci_dss_v321_requirement_11_4_c,
+ control.guardduty_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_11_common_tags, {
+ pci_dss_v321_item_id = "11.4"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_11_4_a" {
+ title = "11.4.a Examine system configurations and network diagrams to verify that techniques (such as intrusion-detection systems and/or intrusion-prevention systems) are in place to monitor all traffic at the perimeter of the cardholder data environment and at critical points in the cardholder data environment"
+ description = "Intrusion detection and/or intrusion prevention techniques (such as IDS/IPS) compare the traffic coming into the network with known “signatures” and/or behaviors of thousands of compromise types (hacker tools, Trojans, and other malware), and send alerts and/or stop the attempt as it happens. Without a proactive approach to unauthorized activity detection, attacks on (or misuse of) computer resources could go unnoticed in real time. Security alerts generated by these techniques should be monitored so that the attempted intrusions can be stopped."
+
+ children = [
+ control.guardduty_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_11_common_tags, {
+ pci_dss_v321_item_id = "11.4.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_11_4_b" {
+ title = "11.4.b Examine system configurations and interview responsible personnel to confirm intrusion-detection and/or intrusion-prevention techniques alert personnel of suspected compromises"
+ description = "Intrusion detection and/or intrusion prevention techniques (such as IDS/IPS) compare the traffic coming into the network with known “signatures” and/or behaviors of thousands of compromise types (hacker tools, Trojans, and other malware), and send alerts and/or stop the attempt as it happens. Without a proactive approach to unauthorized activity detection, attacks on (or misuse of) computer resources could go unnoticed in real time. Security alerts generated by these techniques should be monitored so that the attempted intrusions can be stopped."
+
+ children = [
+ control.guardduty_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_11_common_tags, {
+ pci_dss_v321_item_id = "11.4.b"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_11_4_c" {
+ title = "11.4.c Examine IDS/IPS configurations and vendor documentation to verify intrusion-detection and/or intrusion- prevention techniques are configured, maintained, and updated per vendor instructions to ensure optimal protection"
+ description = "Intrusion detection and/or intrusion prevention techniques (such as IDS/IPS) compare the traffic coming into the network with known “signatures” and/or behaviors of thousands of compromise types (hacker tools, Trojans, and other malware), and send alerts and/or stop the attempt as it happens. Without a proactive approach to unauthorized activity detection, attacks on (or misuse of) computer resources could go unnoticed in real time. Security alerts generated by these techniques should be monitored so that the attempted intrusions can be stopped."
+
+ children = [
+ control.guardduty_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_11_common_tags, {
+ pci_dss_v321_item_id = "11.4.c"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_11_5" {
+ title = "11.5 Deploy a change-detection mechanism (for example, file-integrity monitoring tools) to alert personnel to unauthorized modification (including changes, additions, and deletions) of critical system files, configuration files, or content files; and configure the software to perform critical file comparisons at least weekly"
+ description = "Change-detection solutions such as file-integrity monitoring (FIM) tools check for changes, additions, and deletions to critical files, and notify when such changes are detected. If not implemented properly and the output of the change-detection solution monitored, a malicious individual could add, remove, or alter configuration file contents, operating system programs, or application executables. Unauthorized changes, if undetected, could render existing security controls ineffective and/or result in cardholder data being stolen with no perceptible impact to normal processing."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_11_5_a,
+ benchmark.pci_dss_v321_requirement_11_5_b,
+ control.config_enabled_all_regions
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_11_common_tags, {
+ pci_dss_v321_item_id = "11.5"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_11_5_a" {
+ title = "11.5.a Verify the use of a change-detection mechanism by observing system settings and monitored files, as well as reviewing results from monitoring activities"
+ description = "Examples of files that should be monitored are system executables, application executables, configuration and parameter files, centrally stored, historical or archived, log and audit files and additional critical files determined by entity (for example, through risk assessment or other means). Change-detection solutions such as file-integrity monitoring (FIM) tools check for changes, additions, and deletions to critical files, and notify when such changes are detected. If not implemented properly and the output of the change-detection solution monitored, a malicious individual could add, remove, or alter configuration file contents, operating system programs, or application executables. Unauthorized changes, if undetected, could render existing security controls ineffective and/or result in cardholder data being stolen with no perceptible impact to normal processing."
+
+ children = [
+ control.config_enabled_all_regions
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_11_common_tags, {
+ pci_dss_v321_item_id = "11.5.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_11_5_b" {
+ title = "11.5.b Verify the mechanism is configured to alert personnel to unauthorized modification (including changes, additions, and deletions) of critical files, and to perform critical file comparisons at least weekly"
+
+ children = [
+ control.config_enabled_all_regions
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_11_common_tags, {
+ pci_dss_v321_item_id = "11.5.b"
+ })
+}
diff --git a/pci_dss_v321/requirement_2.sp b/pci_dss_v321/requirement_2.sp
new file mode 100644
index 00000000..304fc5d1
--- /dev/null
+++ b/pci_dss_v321/requirement_2.sp
@@ -0,0 +1,224 @@
+locals {
+ pci_dss_v321_requirement_2_common_tags = merge(local.pci_dss_v321_common_tags, {
+ control_set = "2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_2" {
+ title = "Requirement 2: Do not use vendor-supplied defaults for system passwords and other security parameters"
+ description = "Malicious individuals (external and internal to an entity) often use vendor default passwords and other vendor default settings to compromise systems. These passwords and settings are well known by hacker communities and are easily determined via public information."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_2_1,
+ benchmark.pci_dss_v321_requirement_2_2,
+ benchmark.pci_dss_v321_requirement_2_3,
+ benchmark.pci_dss_v321_requirement_2_4
+ ]
+
+ tags = local.pci_dss_v321_requirement_2_common_tags
+}
+
+benchmark "pci_dss_v321_requirement_2_1" {
+ title = "2.1 Always change vendor-supplied defaults and remove or disable unnecessary default accounts before installing a system on the network"
+ description = "This applies to ALL default passwords, including but not limited to those used by operating systems, software that provides security services, application and system accounts, point-of-sale (POS) terminals, payment applications, Simple Network Management Protocol (SNMP) community strings, etc. Malicious individuals (external and internal to an organization) often use vendor default settings, account names, and passwords to compromise operating system software, applications, and the systems on which they are installed. Because these default settings are often published and are well known in hacker communities, changing these settings will leave systems less vulnerable to attack. Even if a default account is not intended to be used, changing the default password to a strong unique password and then disabling the account will prevent a malicious individual from re-enabling the account and gaining access with the default password."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_2_1_b
+ ]
+ tags = merge(local.pci_dss_v321_requirement_2_common_tags, {
+ pci_dss_v321_item_id = "2.1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_2_1_b" {
+ title = "2.1.b For the sample of system components, verify that all unnecessary default accounts (including accounts used by operating systems, security software, applications, systems, POS terminals, SNMP, etc.) are removed or disabled"
+ description = "Malicious individuals (external and internal to an organization) often use vendor default settings, account names, and passwords to compromise operating system software, applications, and the systems on which they are installed. Because these default settings are often published and are well known in hacker communities, changing these settings will leave systems less vulnerable to attack. Even if a default account is not intended to be used, changing the default password to a strong unique password and then disabling the account will prevent a malicious individual from re-enabling the account and gaining access with the default password."
+
+ children = [
+ control.rds_db_cluster_no_default_admin_name,
+ control.rds_db_instance_no_default_admin_name,
+ control.redshift_cluster_no_default_admin_name
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_2_common_tags, {
+ pci_dss_v321_item_id = "2.1.b"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_2_2" {
+ title = "2.2 Develop configuration standards for all system components"
+ description = "Assure that these standards address all known security vulnerabilities and are consistent with industry-accepted system hardening standards. Sources of industry-accepted system hardening standards may include, but are not limited to Center for Internet Security (CIS), International Organization for Standardization (ISO), SysAdmin Audit Network Security (SANS) Institute and National Institute of Standards Technology (NIST). There are known weaknesses with many operating systems, databases, and enterprise applications, and there are also known ways to configure these systems to fix security vulnerabilities. To help those that are not security experts, a number of security organizations have established system-hardening guidelines and recommendations, which advise how to correct these weaknesses. Examples of sources for guidance on configuration standards include, but are not limited to: www.nist.gov, www.sans.org, and www.cisecurity.org, www.iso.org, and product vendors. System configuration standards must be kept up to date to ensure that newly identified weaknesses are corrected prior to a system being installed on the network."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_2_2_2,
+ benchmark.pci_dss_v321_requirement_2_2_4,
+ benchmark.pci_dss_v321_requirement_2_2_5,
+ benchmark.pci_dss_v321_requirement_2_2_a,
+ benchmark.pci_dss_v321_requirement_2_2_d,
+ control.autoscaling_group_with_lb_use_health_check,
+ control.iam_root_user_no_access_keys,
+ control.s3_bucket_cross_region_replication_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_2_common_tags, {
+ pci_dss_v321_item_id = "2.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_2_2_2" {
+ title = "2.2.2 Enable only necessary services, protocols, daemons, etc., as required for the function of the system"
+ description = "As stated in Requirement 1.1.6, there are many protocols that a business may need (or have enabled by default) that are commonly used by malicious individuals to compromise a network. Including this requirement as part of an organization's configuration standards and related processes ensures that only the necessary services and protocols are enabled."
+
+ children = [
+ control.dms_replication_instance_not_publicly_accessible,
+ control.ebs_snapshot_not_publicly_restorable,
+ control.ec2_instance_in_vpc,
+ control.ec2_instance_not_publicly_accessible,
+ control.eks_cluster_endpoint_restrict_public_access,
+ control.emr_cluster_master_nodes_no_public_ip,
+ control.es_domain_in_vpc,
+ control.lambda_function_in_vpc,
+ control.lambda_function_restrict_public_access,
+ control.rds_db_instance_prohibit_public_access,
+ control.rds_db_snapshot_prohibit_public_access,
+ control.redshift_cluster_prohibit_public_access,
+ control.s3_bucket_restrict_public_read_access,
+ control.s3_public_access_block_bucket_account,
+ control.sagemaker_notebook_instance_direct_internet_access_disabled,
+ control.vpc_endpoint_service_acceptance_required_enabled,
+ control.vpc_igw_attached_to_authorized_vpc,
+ control.vpc_security_group_restrict_ingress_ssh_all
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_2_common_tags, {
+ pci_dss_v321_item_id = "2.2.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_2_2_4" {
+ title = "2.2.4 Configure system security parameters to prevent misuse"
+ description = "System configuration standards and related processes should specifically address security settings and parameters that have known security implications for each type of system in use. In order for systems to be configured securely, personnel responsible for configuration and/or administering systems must be knowledgeable in the specific security parameters and settings that apply to the system."
+
+ children = [
+ control.ec2_instance_ssm_managed,
+ control.ssm_managed_instance_compliance_association_compliant,
+ control.ssm_managed_instance_compliance_patch_compliant
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_2_common_tags, {
+ pci_dss_v321_item_id = "2.2.4"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_2_2_5" {
+ title = "2.2.5 Remove all unnecessary functionality, such as scripts, drivers, features, subsystems, file systems, and unnecessary web servers"
+ description = "Unnecessary functions can provide additional opportunities for malicious individuals to gain access to a system. By removing unnecessary functionality, organizations can focus on securing the functions that are required and reduce the risk that unknown functions will be exploited. Including this in server-hardening standards and processes addresses the specific security implications associated with unnecessary functions (for example, by removing/disabling FTP or the web server if the server will not be performing those functions)."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_2_2_5_b
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_2_common_tags, {
+ pci_dss_v321_item_id = "2.2.5"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_2_2_5_b" {
+ title = "2.2.5.b. Examine the documentation and security parameters to verify enabled functions are documented and support secure configuration"
+ description = "Unnecessary functions can provide additional opportunities for malicious individuals to gain access to a system. By removing unnecessary functionality, organizations can focus on securing the functions that are required and reduce the risk that unknown functions will be exploited. Including this in server-hardening standards and processes addresses the specific security implications associated with unnecessary functions (for example, by removing/disabling FTP or the web server if the server will not be performing those functions)."
+
+ children = [
+ control.ec2_instance_ssm_managed,
+ control.ssm_managed_instance_compliance_association_compliant,
+ control.ssm_managed_instance_compliance_patch_compliant
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_2_common_tags, {
+ pci_dss_v321_item_id = "2.2.5.b"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_2_2_a" {
+ title = "2.2.a Examine the organization's system configuration standards for all types of system components and verify the system configuration standards are consistent with industry-accepted hardening standards"
+ description = "There are known weaknesses with many operating systems, databases, and enterprise applications, and there are also known ways to configure these systems to fix security vulnerabilities. To help those that are not security experts, a number of security organizations have established system-hardening guidelines and recommendations, which advise how to correct these weaknesses. Examples of sources for guidance on configuration standards include, but are not limited to: www.nist.gov, www.sans.org, and www.cisecurity.org, www.iso.org, and product vendors. System configuration standards must be kept up to date to ensure that newly identified weaknesses are corrected prior to a system being installed on the network."
+
+ children = [
+ control.autoscaling_launch_config_requires_imdsv2,
+ control.cloudformation_stack_drift_detection_check,
+ control.rds_db_cluster_no_default_admin_name,
+ control.rds_db_instance_no_default_admin_name,
+ control.redshift_cluster_no_default_admin_name
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_2_common_tags, {
+ pci_dss_v321_item_id = "2.2.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_2_2_d" {
+ title = "2.2.d Verify that system configuration standards include the procedures like changing of all vendor-supplied defaults and elimination of unnecessary default accounts etc. for all types of system components"
+ description = "System configuration standards include the following procedures for all types of system components: changing of all vendor-supplied defaults and elimination of unnecessary default accounts, implementing only one primary function per server to prevent functions that require different security levels from co-existing on the same server, enabling only necessary services, protocols, daemons, etc., as required for the function of the system, implementing additional security features for any required services, protocols or daemons that are considered to be insecure, configuring system security parameters to prevent misuse and removing all unnecessary functionality, such as scripts, drivers, features, subsystems, file systems, and unnecessary web servers. There are known weaknesses with many operating systems, databases, and enterprise applications, and there are also known ways to configure these systems to fix security vulnerabilities. To help those that are not security experts, a number of security organizations have established system-hardening guidelines and recommendations, which advise how to correct these weaknesses. Examples of sources for guidance on configuration standards include, but are not limited to: www.nist.gov, www.sans.org, and www.cisecurity.org, www.iso.org, and product vendors. System configuration standards must be kept up to date to ensure that newly identified weaknesses are corrected prior to a system being installed on the network."
+
+ children = [
+ control.autoscaling_launch_config_requires_imdsv2,
+ control.cloudformation_stack_drift_detection_check,
+ control.rds_db_cluster_no_default_admin_name,
+ control.rds_db_instance_no_default_admin_name,
+ control.redshift_cluster_no_default_admin_name
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_2_common_tags, {
+ pci_dss_v321_item_id = "2.2.d"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_2_3" {
+ title = "2.3 Encrypt all non-console administrative access using strong cryptography"
+ description = "If non-console (including remote) administration does not use secure authentication and encrypted communications, sensitive administrative or operational level information (like administrator's IDs and passwords) can be revealed to an eavesdropper. A malicious individual could use this information to access the network, become administrator, and steal data. Clear-text protocols (such as HTTP, telnet, etc.) do not encrypt traffic or logon details, making it easy for an eavesdropper to intercept this information. Select a sample of system components and verify that non-console administrative access is encrypted."
+
+ children = [
+ control.acm_certificate_expires_30_days,
+ control.cloudfront_distribution_encryption_in_transit_enabled,
+ control.cloudfront_distribution_no_deprecated_ssl_protocol,
+ control.elb_application_lb_drop_http_headers,
+ control.elb_application_lb_redirect_http_request_to_https,
+ control.elb_application_network_lb_use_ssl_certificate,
+ control.elb_classic_lb_use_ssl_certificate,
+ control.elb_classic_lb_use_tls_https_listeners,
+ control.redshift_cluster_encryption_in_transit_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_2_common_tags, {
+ pci_dss_v321_item_id = "2.3"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_2_4" {
+ title = "2.4 Maintain an inventory of system components that are in scope for PCI DSS"
+ description = "Maintaining a current list of all system components will enable an organization to accurately and efficiently define the scope of their environment for implementing PCI DSS controls. Without an inventory, some system components could be forgotten, and be inadvertently excluded from the organization's configuration standards."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_2_4_a,
+ control.config_enabled_all_regions,
+ control.ec2_instance_ssm_managed,
+ control.ssm_managed_instance_compliance_association_compliant,
+ control.vpc_eip_associated
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_2_common_tags, {
+ pci_dss_v321_item_id = "2.4"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_2_4_a" {
+ title = "2.4.a Examine system inventory to verify that a list of hardware and software components is maintained and includes a description of function/use for each"
+ description = "Maintaining a current list of all system components will enable an organization to accurately and efficiently define the scope of their environment for implementing PCI DSS controls. Without an inventory, some system components could be forgotten, and be inadvertently excluded from the organization's configuration standards."
+
+ children = [
+ control.config_enabled_all_regions
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_2_common_tags, {
+ pci_dss_v321_item_id = "2.4.a"
+ })
+}
diff --git a/pci_dss_v321/requirement_3.sp b/pci_dss_v321/requirement_3.sp
new file mode 100644
index 00000000..fa51203a
--- /dev/null
+++ b/pci_dss_v321/requirement_3.sp
@@ -0,0 +1,416 @@
+locals {
+ pci_dss_v321_requirement_3_common_tags = merge(local.pci_dss_v321_common_tags, {
+ control_set = "3"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_3" {
+ title = "Requirement 3: Protect stored cardholder data"
+ description = "Protection methods such as encryption, truncation, masking, and hashing are critical components of cardholder data protection. If an intruder circumvents other security controls and gains access to encrypted data, without the proper cryptographic keys, the data is unreadable and unusable to that person."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_3_1,
+ benchmark.pci_dss_v321_requirement_3_2,
+ benchmark.pci_dss_v321_requirement_3_4,
+ benchmark.pci_dss_v321_requirement_3_5,
+ benchmark.pci_dss_v321_requirement_3_6
+ ]
+
+ tags = local.pci_dss_v321_requirement_3_common_tags
+}
+
+benchmark "pci_dss_v321_requirement_3_1" {
+ title = "3.1 Keep cardholder data storage to a minimum by implementing data retention and disposal policies, procedures and processes"
+ description = "Procedures and processes should include at least the following for all cardholder data (CHD) storage: limiting data storage amount and retention time to that which is required for legal, regulatory, and/or business requirements, specific retention requirements for cardholder data, processes for secure deletion of data when no longer needed, a quarterly process for identifying and securely deleting stored cardholder data that exceeds defined retention. Identifying and deleting stored data that has exceeded its specified retention period prevents unnecessary retention of data that is no longer needed. This process may be automated or manual or a combination of both. For example, a programmatic procedure (automatic or manual) to locate and remove data and/or a manual review of data storage areas could be performed. Implementing secure deletion methods ensure that the data cannot be retrieved when it is no longer needed. Remember, if you don't need it, don't store it!"
+
+ children = [
+ benchmark.pci_dss_v321_requirement_3_1_a,
+ benchmark.pci_dss_v321_requirement_3_1_c
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_3_common_tags, {
+ pci_dss_v321_item_id = "3.1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_3_1_a" {
+ title = "3.1.a Examine the data retention and disposal policies, procedures and processes to verify they satisfy all the requirements for cardholder data (CHD) storage"
+ description = "procedures and processes should they include the following for all cardholder data (CHD) storage: limiting data storage amount and retention time to that which is required for legal, regulatory, and/or business requirements, specific requirements for retention of cardholder data (for example, cardholder data needs to be held for X period for Y business reasons), processes for secure deletion of cardholder data when no longer needed for legal, regulatory, or business reasons and a quarterly process for identifying and securely deleting stored cardholder data that exceeds defined retention requirements. Identifying and deleting stored data that has exceeded its specified retention period prevents unnecessary retention of data that is no longer needed. This process may be automated or manual or a combination of both. For example, a programmatic procedure (automatic or manual) to locate and remove data and/or a manual review of data storage areas could be performed. Implementing secure deletion methods ensure that the data cannot be retrieved when it is no longer needed. Remember, if you don't need it, don't store it!"
+
+ children = [
+ control.s3_bucket_lifecycle_policy_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_3_common_tags, {
+ pci_dss_v321_item_id = "3.1.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_3_1_c" {
+ title = "3.1.c For a sample of system components that store cardholder data examine files and system records to verify that the data stored does not exceed the requirements defined in the data retention policy and observe the deletion mechanism to verify data is deleted securely"
+ description = "Identifying and deleting stored data that has exceeded its specified retention period prevents unnecessary retention of data that is no longer needed. This process may be automated or manual or a combination of both. For example, a programmatic procedure (automatic or manual) to locate and remove data and/or a manual review of data storage areas could be performed. Implementing secure deletion methods ensure that the data cannot be retrieved when it is no longer needed. Remember, if you don't need it, don't store it!"
+
+ children = [
+ control.backup_plan_min_retention_35_days,
+ control.backup_recovery_point_encryption_enabled,
+ control.backup_recovery_point_manual_deletion_disabled,
+ control.backup_recovery_point_min_retention_35_days,
+ control.dynamodb_table_in_backup_plan,
+ control.dynamodb_table_point_in_time_recovery_enabled,
+ control.dynamodb_table_protected_by_backup_plan,
+ control.ebs_volume_in_backup_plan,
+ control.ebs_volume_protected_by_backup_plan,
+ control.ec2_instance_ebs_optimized,
+ control.ec2_instance_protected_by_backup_plan,
+ control.efs_file_system_in_backup_plan,
+ control.efs_file_system_protected_by_backup_plan,
+ control.elasticache_redis_cluster_automatic_backup_retention_15_days,
+ control.fsx_file_system_protected_by_backup_plan,
+ control.rds_db_cluster_aurora_protected_by_backup_plan,
+ control.rds_db_instance_backup_enabled,
+ control.rds_db_instance_in_backup_plan,
+ control.rds_db_instance_protected_by_backup_plan,
+ control.redshift_cluster_automatic_snapshots_min_7_days,
+ control.s3_bucket_cross_region_replication_enabled,
+ control.s3_bucket_versioning_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_3_common_tags, {
+ pci_dss_v321_item_id = "3.1.c"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_3_2" {
+ title = "3.2 Do not store sensitive authentication data after authorization (even if encrypted)"
+ description = "If sensitive authentication data is received, render all data unrecoverable upon completion of the authorization process. It is permissible for issuers and companies that support issuing services to store sensitive authentication data if there is a business justification and the data is stored securely. Sensitive authentication data includes the data as cited in the following Requirements 3.2.1 through 3.2.3. Sensitive authentication data consists of full track data, card validation code or value, and PIN data. Storage of sensitive authentication data after authorization is prohibited! This data is very valuable to malicious individuals as it allows them to generate counterfeit payment cards and create fraudulent transactions. Entities that issue payment cards or that perform or support issuing services will often create and control sensitive authentication data as part of the issuing function. It is allowable for companies that perform, facilitate, or support issuing services to store sensitive authentication data ONLY IF they have a legitimate business need to store such data. It should be noted that all PCI DSS requirements apply to issuers, and the only exception for issuers and issuer processors is that sensitive authentication data may be retained if there is a legitimate reason to do so. A legitimate reason is one that is necessary for the performance of the function being provided for the issuer and not one of convenience. Any such data must be stored securely and in accordance with all PCI DSS and specific payment brand requirements."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_3_2_3,
+ benchmark.pci_dss_v321_requirement_3_2_c
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_3_common_tags, {
+ pci_dss_v321_item_id = "3.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_3_2_3" {
+ title = "3.2.3 Do not store the personal identification number (PIN) or the encrypted PIN block after authorization"
+ description = "These values should be known only to the card owner or bank that issued the card. If this data is stolen, malicious individuals can execute fraudulent PIN-based debit transactions (for example, ATM withdrawals). For a sample of system components, examine data sources, including but not limited to the following and verify that PINs and encrypted PIN blocks are not stored after authorization: incoming transaction data, all logs (for example, transaction, history, debugging, error), history files, trace files, several database schemas, database contents"
+
+ children = [
+ control.apigateway_stage_logging_enabled,
+ control.cloudtrail_multi_region_trail_enabled,
+ control.cloudtrail_trail_enabled,
+ control.cloudtrail_trail_integrated_with_logs,
+ control.elb_application_classic_lb_logging_enabled,
+ control.rds_db_instance_logging_enabled,
+ control.waf_web_acl_logging_enabled,
+ control.wafv2_web_acl_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_3_common_tags, {
+ pci_dss_v321_item_id = "3.2.3"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_3_2_c" {
+ title = "3.2.c For all other entities, if sensitive authentication data is received, review policies and procedures, and examine system configurations to verify the data is not retained after authorization"
+ description = "Sensitive authentication data consists of full track data, card validation code or value, and PIN data. Storage of sensitive authentication data after authorization is prohibited! This data is very valuable to malicious individuals as it allows them to generate counterfeit payment cards and create fraudulent transactions. Entities that issue payment cards or that perform or support issuing services will often create and control sensitive authentication data as part of the issuing function. It is allowable for companies that perform, facilitate, or support issuing services to store sensitive authentication data ONLY IF they have a legitimate business need to store such data. It should be noted that all PCI DSS requirements apply to issuers, and the only exception for issuers and issuer processors is that sensitive authentication data may be retained if there is a legitimate reason to do so. A legitimate reason is one that is necessary for the performance of the function being provided for the issuer and not one of convenience. Any such data must be stored securely and in accordance with all PCI DSS and specific payment brand requirements."
+
+ children = [
+ control.s3_bucket_lifecycle_policy_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_3_common_tags, {
+ pci_dss_v321_item_id = "3.2.c"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_3_4" {
+ title = "3.4 Render PAN unreadable anywhere it is stored (including on portable digital media, backup media, and in logs) by using approaches like one-way hashes based on strong cryptography, truncation etc"
+ description = "The following approaches should be used to render PAN unreadable anywhere it is stored: One-way hashes based on strong cryptography, (hash must be of the entire PAN), truncation (hashing cannot be used to replace the truncated segment of PAN), index tokens and pads (pads must be securely stored) and strong cryptography with associated key-management processes and procedures. Note: It is a relatively trivial effort for a malicious individual to reconstruct original PAN data if they have access to both the truncated and hashed version of a PAN. Where hashed and truncated versions of the same PAN are present in an entity's environment, additional controls must be in place to ensure that the hashed and truncated versions cannot be correlated to reconstruct the original PAN. PANs stored in primary storage (databases, or flat files such as text files spreadsheets) as well as non-primary storage (backup, audit logs, exception or troubleshooting logs) must all be protected. One-way hash functions based on strong cryptography can be used to render cardholder data unreadable. Hash functions are appropriate when there is no need to retrieve the original number (one-way hashes are irreversible). It is recommended, but not currently a requirement, that an additional, random input value be added to the cardholder data prior to hashing to reduce the feasibility of an attacker comparing the data against (and deriving the PAN from) tables of pre- computed hash values. The intent of truncation is to permanently remove a segment of PAN data so that only a portion (generally not to exceed the first six and last four digits) of the PAN is stored. An index token is a cryptographic token that replaces the PAN based on a given index for an unpredictable value. A one-time pad is a system in which a randomly generated private key is used only once to encrypt a message that is then decrypted using a matching one-time pad and key. The intent of strong cryptography (as defined in the PCI DSS and PA-DSS Glossary of Terms, Abbreviations, and Acronyms) is that the encryption be based on an industry-tested and accepted algorithm (not a proprietary or `home- grown` algorithm) with strong cryptographic keys. By correlating hashed and truncated versions of a given PAN, a malicious individual may easily derive the original PAN value. Controls that prevent the correlation of this data will help ensure that the original PAN remains unreadable."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_3_4_1,
+ benchmark.pci_dss_v321_requirement_3_4_a,
+ benchmark.pci_dss_v321_requirement_3_4_b,
+ benchmark.pci_dss_v321_requirement_3_4_d,
+ control.apigateway_stage_cache_encryption_at_rest_enabled,
+ control.cloudtrail_trail_logs_encrypted_with_kms_cmk,
+ control.dax_cluster_encryption_at_rest_enabled,
+ control.dynamodb_table_encrypted_with_kms,
+ control.dynamodb_table_encryption_enabled,
+ control.ebs_attached_volume_encryption_enabled,
+ control.ec2_ebs_default_encryption_enabled,
+ control.efs_file_system_encrypted_with_cmk,
+ control.eks_cluster_secrets_encrypted,
+ control.es_domain_encryption_at_rest_enabled,
+ control.log_group_encryption_at_rest_enabled,
+ control.rds_db_instance_encryption_at_rest_enabled,
+ control.rds_db_snapshot_encrypted_at_rest,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_default_encryption_enabled_kms,
+ control.s3_bucket_default_encryption_enabled,
+ control.sagemaker_endpoint_configuration_encryption_at_rest_enabled,
+ control.sagemaker_notebook_instance_encryption_at_rest_enabled,
+ control.sns_topic_encrypted_at_rest
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_3_common_tags, {
+ pci_dss_v321_item_id = "3.4"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_3_4_1" {
+ title = "3.4.1 If disk encryption is used (rather than file- or column-level database encryption), logical access must be managed separately and independently of native operating system authentication and access control mechanisms (for example, by not using local user account databases or general network login credentials)"
+ description = "Decryption keys must not be associated with user accounts. Note: This requirement applies in addition to all other PCI DSS encryption and key- management requirements. PANs stored in primary storage (databases, or flat files such as text files spreadsheets) as well as non-primary storage (backup, audit logs, exception or troubleshooting logs) must all be protected. One-way hash functions based on strong cryptography can be used to render cardholder data unreadable. Hash functions are appropriate when there is no need to retrieve the original number (one-way hashes are irreversible). It is recommended, but not currently a requirement, that an additional, random input value be added to the cardholder data prior to hashing to reduce the feasibility of an attacker comparing the data against (and deriving the PAN from) tables of pre- computed hash values. The intent of truncation is to permanently remove a segment of PAN data so that only a portion (generally not to exceed the first six and last four digits) of the PAN is stored. An index token is a cryptographic token that replaces the PAN based on a given index for an unpredictable value. A one-time pad is a system in which a randomly generated private key is used only once to encrypt a message that is then decrypted using a matching one-time pad and key. The intent of strong cryptography (as defined in the PCI DSS and PA-DSS Glossary of Terms, Abbreviations, and Acronyms) is that the encryption be based on an industry-tested and accepted algorithm (not a proprietary or `home-grown` algorithm) with strong cryptographic keys. By correlating hashed and truncated versions of a given PAN, a malicious individual may easily derive the original PAN value. Controls that prevent the correlation of this data will help ensure that the original PAN remains unreadable."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_3_4_1_a,
+ benchmark.pci_dss_v321_requirement_3_4_1_c,
+ control.apigateway_stage_cache_encryption_at_rest_enabled,
+ control.cloudtrail_trail_logs_encrypted_with_kms_cmk,
+ control.dax_cluster_encryption_at_rest_enabled,
+ control.dynamodb_table_encrypted_with_kms,
+ control.dynamodb_table_encryption_enabled,
+ control.ebs_attached_volume_encryption_enabled,
+ control.ec2_ebs_default_encryption_enabled,
+ control.efs_file_system_encrypted_with_cmk,
+ control.eks_cluster_secrets_encrypted,
+ control.es_domain_encryption_at_rest_enabled,
+ control.log_group_encryption_at_rest_enabled,
+ control.rds_db_instance_encryption_at_rest_enabled,
+ control.rds_db_snapshot_encrypted_at_rest,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_default_encryption_enabled_kms,
+ control.s3_bucket_default_encryption_enabled,
+ control.sagemaker_endpoint_configuration_encryption_at_rest_enabled,
+ control.sagemaker_notebook_instance_encryption_at_rest_enabled,
+ control.sns_topic_encrypted_at_rest
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_3_common_tags, {
+ pci_dss_v321_item_id = "3.4.1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_3_4_1_a" {
+ title = "3.4.1.a If disk encryption is used, inspect the configuration and observe the authentication process to verify that logical access to encrypted file systems is implemented via a mechanism that is separate from the native operating system's authentication mechanism (for example, not using local user account databases or general network login credentials)"
+ description = "The intent of this requirement is to address the acceptability of disk-level encryption for rendering cardholder data unreadable. Disk-level encryption encrypts the entire disk/partition on a computer and automatically decrypts the information when an authorized user requests it. Many disk- encryption solutions intercept operating system read/write operations and carry out the appropriate cryptographic transformations without any special action by the user other than supplying a password or pass phrase upon system startup or at the beginning of a session. Based on these characteristics of disk-level encryption, to be compliant with this requirement, the method cannot: 1) Use the same user account authenticator as the operating system, or 2) Use a decryption key that is associated with or derived from the system's local user account database or general network login credentials. Full disk encryption helps to protect data in the event of physical loss of a disk and therefore may be appropriate for portable devices that store cardholder data."
+
+ children = [
+ control.apigateway_stage_cache_encryption_at_rest_enabled,
+ control.cloudtrail_trail_logs_encrypted_with_kms_cmk,
+ control.dax_cluster_encryption_at_rest_enabled,
+ control.dynamodb_table_encrypted_with_kms,
+ control.dynamodb_table_encryption_enabled,
+ control.ebs_attached_volume_encryption_enabled,
+ control.ec2_ebs_default_encryption_enabled,
+ control.efs_file_system_encrypted_with_cmk,
+ control.eks_cluster_secrets_encrypted,
+ control.es_domain_encryption_at_rest_enabled,
+ control.log_group_encryption_at_rest_enabled,
+ control.rds_db_instance_encryption_at_rest_enabled,
+ control.rds_db_snapshot_encrypted_at_rest,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_default_encryption_enabled_kms,
+ control.s3_bucket_default_encryption_enabled,
+ control.sagemaker_endpoint_configuration_encryption_at_rest_enabled,
+ control.sagemaker_notebook_instance_encryption_at_rest_enabled,
+ control.sns_topic_encrypted_at_rest
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_3_common_tags, {
+ pci_dss_v321_item_id = "3.4.1.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_3_4_1_c" {
+ title = "3.4.1.c Examine the configurations and observe the processes to verify that cardholder data on removable media is encrypted wherever stored"
+ description = " Note: If disk encryption is not used to encrypt removable media, the data stored on this media will need to be rendered unreadable through some other method. The intent of this requirement is to address the acceptability of disk-level encryption for rendering cardholder data unreadable. Disk-level encryption encrypts the entire disk/partition on a computer and automatically decrypts the information when an authorized user requests it. Many disk- encryption solutions intercept operating system read/write operations and carry out the appropriate cryptographic transformations without any special action by the user other than supplying a password or pass phrase upon system startup or at the beginning of a session. Based on these characteristics of disk-level encryption, to be compliant with this requirement, the method cannot: 1) Use the same user account authenticator as the operating system, or 2) Use a decryption key that is associated with or derived from the system's local user account database or general network login credentials. Full disk encryption helps to protect data in the event of physical loss of a disk and therefore may be appropriate for portable devices that store cardholder data."
+
+ children = [
+ control.apigateway_stage_cache_encryption_at_rest_enabled,
+ control.cloudtrail_trail_logs_encrypted_with_kms_cmk,
+ control.dax_cluster_encryption_at_rest_enabled,
+ control.dynamodb_table_encrypted_with_kms,
+ control.dynamodb_table_encryption_enabled,
+ control.ebs_attached_volume_encryption_enabled,
+ control.ec2_ebs_default_encryption_enabled,
+ control.efs_file_system_encrypted_with_cmk,
+ control.eks_cluster_secrets_encrypted,
+ control.es_domain_encryption_at_rest_enabled,
+ control.log_group_encryption_at_rest_enabled,
+ control.rds_db_instance_encryption_at_rest_enabled,
+ control.rds_db_snapshot_encrypted_at_rest,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_default_encryption_enabled_kms,
+ control.s3_bucket_default_encryption_enabled,
+ control.sagemaker_endpoint_configuration_encryption_at_rest_enabled,
+ control.sagemaker_notebook_instance_encryption_at_rest_enabled,
+ control.sns_topic_encrypted_at_rest
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_3_common_tags, {
+ pci_dss_v321_item_id = "3.4.1.c"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_3_4_a" {
+ title = "3.4.a Examine documentation about the system used to protect the PAN, including the vendor, type of system/process, and the encryption algorithms (if applicable) to verify that the PAN is rendered unreadable using methods like truncation,one-way hashes based on strong cryptography etc"
+ description = "Verify documentation about the system used to protect the PAN, including the vendor, type of system/process, and the encryption algorithms (if applicable) to verify that the PAN is rendered unreadable using any of the following methods: One-way hashes based on strong cryptography, truncation, index tokens and pads with the pads being securely stored, strong cryptography, with associated key-management processes and procedures. PANs stored in primary storage (databases, or flat files such as text files spreadsheets) as well as non-primary storage (backup, audit logs, exception or troubleshooting logs) must all be protected. One-way hash functions based on strong cryptography can be used to render cardholder data unreadable. Hash functions are appropriate when there is no need to retrieve the original number (one-way hashes are irreversible). It is recommended, but not currently a requirement, that an additional, random input value be added to the cardholder data prior to hashing to reduce the feasibility of an attacker comparing the data against (and deriving the PAN from) tables of pre- computed hash values. The intent of truncation is to permanently remove a segment of PAN data so that only a portion (generally not to exceed the first six and last four digits) of the PAN is stored. An index token is a cryptographic token that replaces the PAN based on a given index for an unpredictable value. A one-time pad is a system in which a randomly generated private key is used only once to encrypt a message that is then decrypted using a matching one-time pad and key. The intent of strong cryptography (as defined in the PCI DSS and PA-DSS Glossary of Terms, Abbreviations, and Acronyms) is that the encryption be based on an industry-tested and accepted algorithm (not a proprietary or `home-grown` algorithm) with strong cryptographic keys. By correlating hashed and truncated versions of a given PAN, a malicious individual may easily derive the original PAN value. Controls that prevent the correlation of this data will help ensure that the original PAN remains unreadable."
+
+ children = [
+ control.apigateway_stage_cache_encryption_at_rest_enabled,
+ control.backup_recovery_point_encryption_enabled,
+ control.cloudtrail_trail_logs_encrypted_with_kms_cmk,
+ control.dax_cluster_encryption_at_rest_enabled,
+ control.dynamodb_table_encrypted_with_kms,
+ control.dynamodb_table_encryption_enabled,
+ control.ebs_attached_volume_encryption_enabled,
+ control.ec2_ebs_default_encryption_enabled,
+ control.efs_file_system_encrypted_with_cmk,
+ control.eks_cluster_secrets_encrypted,
+ control.es_domain_encryption_at_rest_enabled,
+ control.log_group_encryption_at_rest_enabled,
+ control.opensearch_domain_encryption_at_rest_enabled,
+ control.rds_db_instance_encryption_at_rest_enabled,
+ control.rds_db_snapshot_encrypted_at_rest,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_default_encryption_enabled_kms,
+ control.s3_bucket_default_encryption_enabled,
+ control.sagemaker_endpoint_configuration_encryption_at_rest_enabled,
+ control.sagemaker_notebook_instance_encryption_at_rest_enabled,
+ control.sns_topic_encrypted_at_rest
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_3_common_tags, {
+ pci_dss_v321_item_id = "3.4.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_3_4_b" {
+ title = "3.4.b Examine several tables or files from a sample of data repositories to verify the PAN is rendered unreadable (that is, not stored in plain-text)"
+ description = "PANs stored in primary storage (databases, or flat files such as text files spreadsheets) as well as non-primary storage (backup, audit logs, exception or troubleshooting logs) must all be protected. One-way hash functions based on strong cryptography can be used to render cardholder data unreadable. Hash functions are appropriate when there is no need to retrieve the original number (one-way hashes are irreversible). It is recommended, but not currently a requirement, that an additional, random input value be added to the cardholder data prior to hashing to reduce the feasibility of an attacker comparing the data against (and deriving the PAN from) tables of pre- computed hash values. The intent of truncation is to permanently remove a segment of PAN data so that only a portion (generally not to exceed the first six and last four digits) of the PAN is stored. An index token is a cryptographic token that replaces the PAN based on a given index for an unpredictable value. A one-time pad is a system in which a randomly generated private key is used only once to encrypt a message that is then decrypted using a matching one-time pad and key. The intent of strong cryptography (as defined in the PCI DSS and PA-DSS Glossary of Terms, Abbreviations, and Acronyms) is that the encryption be based on an industry-tested and accepted algorithm (not a proprietary or `home- grown` algorithm) with strong cryptographic keys. By correlating hashed and truncated versions of a given PAN, a malicious individual may easily derive the original PAN value. Controls that prevent the correlation of this data will help ensure that the original PAN remains unreadable."
+
+ children = [
+ control.apigateway_stage_cache_encryption_at_rest_enabled,
+ control.backup_recovery_point_encryption_enabled,
+ control.cloudtrail_trail_logs_encrypted_with_kms_cmk,
+ control.dax_cluster_encryption_at_rest_enabled,
+ control.dynamodb_table_encrypted_with_kms,
+ control.dynamodb_table_encryption_enabled,
+ control.ebs_attached_volume_encryption_enabled,
+ control.ec2_ebs_default_encryption_enabled,
+ control.efs_file_system_encrypted_with_cmk,
+ control.eks_cluster_secrets_encrypted,
+ control.es_domain_encryption_at_rest_enabled,
+ control.log_group_encryption_at_rest_enabled,
+ control.opensearch_domain_encryption_at_rest_enabled,
+ control.rds_db_instance_encryption_at_rest_enabled,
+ control.rds_db_snapshot_encrypted_at_rest,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_default_encryption_enabled_kms,
+ control.s3_bucket_default_encryption_enabled,
+ control.sagemaker_endpoint_configuration_encryption_at_rest_enabled,
+ control.sagemaker_notebook_instance_encryption_at_rest_enabled,
+ control.sns_topic_encrypted_at_rest
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_3_common_tags, {
+ pci_dss_v321_item_id = "3.4.b"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_3_4_d" {
+ title = "3.4.d Examine a sample of audit logs, including payment application logs, to confirm that PAN is rendered unreadable or is not present in the logs"
+ description = "PANs stored in primary storage (databases, or flat files such as text files spreadsheets) as well as non-primary storage (backup, audit logs, exception or troubleshooting logs) must all be protected. One-way hash functions based on strong cryptography can be used to render cardholder data unreadable. Hash functions are appropriate when there is no need to retrieve the original number (one-way hashes are irreversible). It is recommended, but not currently a requirement, that an additional, random input value be added to the cardholder data prior to hashing to reduce the feasibility of an attacker comparing the data against (and deriving the PAN from) tables of pre- computed hash values. The intent of truncation is to permanently remove a segment of PAN data so that only a portion (generally not to exceed the first six and last four digits) of the PAN is stored. An index token is a cryptographic token that replaces the PAN based on a given index for an unpredictable value. A one-time pad is a system in which a randomly generated private key is used only once to encrypt a message that is then decrypted using a matching one-time pad and key. The intent of strong cryptography (as defined in the PCI DSS and PA-DSS Glossary of Terms, Abbreviations, and Acronyms) is that the encryption be based on an industry-tested and accepted algorithm (not a proprietary or 'home-grown` algorithm) with strong cryptographic keys. By correlating hashed and truncated versions of a given PAN, a malicious individual may easily derive the original PAN value. Controls that prevent the correlation of this data will help ensure that the original PAN remains unreadable."
+
+ children = [
+ control.apigateway_stage_cache_encryption_at_rest_enabled,
+ control.cloudtrail_multi_region_trail_enabled,
+ control.cloudtrail_trail_enabled,
+ control.cloudtrail_trail_integrated_with_logs,
+ control.cloudtrail_trail_logs_encrypted_with_kms_cmk,
+ control.elb_application_classic_lb_logging_enabled,
+ control.log_group_encryption_at_rest_enabled,
+ control.rds_db_instance_logging_enabled,
+ control.s3_bucket_default_encryption_enabled_kms,
+ control.s3_bucket_default_encryption_enabled,
+ control.waf_web_acl_logging_enabled,
+ control.wafv2_web_acl_logging_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_3_common_tags, {
+ pci_dss_v321_item_id = "3.4.d"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_3_5" {
+ title = "3.5 Document and implement procedures to protect keys used to secure stored cardholder data against disclosure and misuse"
+ description = "Note: This requirement applies to keys used to encrypt stored cardholder data, and also applies to key-encrypting keys used to protect data-encrypting keys—such key- encrypting keys must be at least as strong as the data-encrypting key. Cryptographic keys must be strongly protected because those who obtain access will be able to decrypt data. Key-encrypting keys, if used, must be at least as strong as the data-encrypting key in order to ensure proper protection of the key that encrypts the data as well as the data encrypted with that key. The requirement to protect keys from disclosure and misuse applies to both data-encrypting keys and key-encrypting keys. Because one key- encrypting key may grant access to many data- encrypting keys, the key-encrypting keys require strong protection measures."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_3_5_2
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_3_common_tags, {
+ pci_dss_v321_item_id = "3.5"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_3_5_2" {
+ title = "3.5.2 Restrict access to cryptographic keys to the fewest number of custodians necessary"
+ description = "There should be very few who have access to cryptographic keys (reducing the potential for rending cardholder data visible by unauthorized parties), usually only those who have key custodian responsibilities."
+
+ children = [
+ control.iam_policy_custom_no_blocked_kms_actions,
+ control.iam_policy_inline_no_blocked_kms_actions
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_3_common_tags, {
+ pci_dss_v321_item_id = "3.5.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_3_6" {
+ title = "3.6 Fully document and implement all key-management processes and procedures for cryptographic keys used for encryption of cardholder data"
+ description = "Note: Numerous industry standards for key management are available from various resources including NIST, which can be found at http://csrc.nist.gov. The manner in which cryptographic keys are managed is a critical part of the continued security of the encryption solution. A good key- management process, whether it is manual or automated as part of the encryption product, is based on industry standards and addresses all key elements at 3.6.1 through 3.6.8. Providing guidance to customers on how to securely transmit, store and update cryptographic keys can help prevent keys from being mismanaged or disclosed to unauthorized entities. This requirement applies to keys used to encrypt stored cardholder data, and any respective key- encrypting keys. Note: Testing Procedure 3.6.a is an additional procedure that only applies if the entity being assessed is a service provider."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_3_6_4
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_3_common_tags, {
+ pci_dss_v321_item_id = "3.6"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_3_6_4" {
+ title = "3.6.4 Cryptographic key changes for keys that have reached the end of their cryptoperiod (for example, after a defined period of time has passed and/or after a certain amount of cipher-text has been produced by a given key), as defined by the associated application vendor or key owner, and based on industry best practices and guidelines"
+ description = "A cryptoperiod is the time span during which a particular cryptographic key can be used for its defined purpose. Considerations for defining the cryptoperiod include, but are not limited to, the strength of the underlying algorithm, size or length of the key, risk of key compromise, and the sensitivity of the data being encrypted. Periodic changing of encryption keys when the keys have reached the end of their cryptoperiod is imperative to minimize the risk of someone's obtaining the encryption keys, and using them to decrypt data."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_3_6_4_a,
+ control.kms_key_decryption_restricted_in_iam_customer_managed_policy,
+ control.kms_key_decryption_restricted_in_iam_inline_policy
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_3_common_tags, {
+ pci_dss_v321_item_id = "3.6.4"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_3_6_4_a" {
+ title = "3.6.4.a Verify that key-management procedures include a defined cryptoperiod for each key type in use and define a process for key changes at the end of the defined cryptoperiod(s)"
+ description = "A cryptoperiod is the time span during which a particular cryptographic key can be used for its defined purpose. Considerations for defining the cryptoperiod include, but are not limited to, the strength of the underlying algorithm, size or length of the key, risk of key compromise, and the sensitivity of the data being encrypted. Periodic changing of encryption keys when the keys have reached the end of their cryptoperiod is imperative to minimize the risk of someone’s obtaining the encryption keys, and using them to decrypt data."
+ children = [
+ control.kms_key_decryption_restricted_in_iam_customer_managed_policy,
+ control.kms_key_decryption_restricted_in_iam_inline_policy
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_3_common_tags, {
+ pci_dss_v321_item_id = "3.6.4.a"
+ })
+}
diff --git a/pci_dss_v321/requirement_4.sp b/pci_dss_v321/requirement_4.sp
new file mode 100644
index 00000000..8fdbc3a0
--- /dev/null
+++ b/pci_dss_v321/requirement_4.sp
@@ -0,0 +1,133 @@
+locals {
+ pci_dss_v321_requirement_4_common_tags = merge(local.pci_dss_v321_common_tags, {
+ control_set = "4"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_4" {
+ title = "Requirement 4: Encrypt transmission of cardholder data across open, public networks"
+ description = "Sensitive information must be encrypted during transmission over networks that are easily accessed by malicious individuals. Misconfigured wireless networks and vulnerabilities in legacy encryption and authentication protocols continue to be targets of malicious individuals who exploit these vulnerabilities to gain privileged access to cardholder data environments."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_4_1
+ ]
+
+ tags = local.pci_dss_v321_requirement_4_common_tags
+}
+
+benchmark "pci_dss_v321_requirement_4_1" {
+ title = "4.1 Use strong cryptography and security protocols to safeguard sensitive cardholder data during transmission over open, public networks"
+ description = "Following should be used to safeguard sensitive cardholder data during transmission over open, public networks: only trusted keys and certificates are accepted, the protocol in use only supports secure versions or configurations and the encryption strength is appropriate for the encryption methodology in use. Examples of open, public networks include but are not limited to the Internet, wireless technologies, including 802.11 and Bluetooth, cellular technologies, for example, Global System for Mobile communications (GSM), Code division multiple access (CDMA), general Packet Radio Service (GPRS) and satellite communications. Sensitive information must be encrypted during transmission over public networks, because it is easy and common for a malicious individual to intercept and/or divert data while in transit. Secure transmission of cardholder data requires using trusted keys/certificates, a secure protocol for transport, and proper encryption strength to encrypt cardholder data. Connection requests from systems that do not support the required encryption strength, and that would result in an insecure connection, should not be accepted. Note that some protocol implementations (such as SSL, SSH v1.0, and early TLS) have known vulnerabilities that an attacker can use to gain control of the affected system. Whichever security protocol is used, ensure it is configured to use only secure versions and configurations to prevent use of an insecure connection—for example, by using only trusted certificates and supporting only strong encryption (not supporting weaker, insecure protocols or methods). Verifying that certificates are trusted (for example, have not expired and are issued from a trusted source) helps ensure the integrity of the secure connection. Generally, the web page URL should begin with `HTTPS` and/or the web browser display a padlock icon somewhere in the window of the browser. Many TLS certificate vendors also provide a highly visible verification seal— sometimes referred to as a “security seal,” `secure site seal,` or “secure trust seal”)—which may provide the ability to click on the seal to reveal information about the website. Refer to industry standards and best practices for information on strong cryptography and secure protocols (e.g., NIST SP 800-52 and SP 800-57, OWASP, etc.) Note: SSL/early TLS is not considered strong cryptography and may not be used as a security control, except by POS POI terminals that are verified as not being susceptible to known exploits and the termination points to which they connect as defined in Appendix A2."
+ children = [
+ benchmark.pci_dss_v321_requirement_4_1_a,
+ benchmark.pci_dss_v321_requirement_4_1_d,
+ benchmark.pci_dss_v321_requirement_4_1_e,
+ benchmark.pci_dss_v321_requirement_4_1_f,
+ benchmark.pci_dss_v321_requirement_4_1_g,
+ control.acm_certificate_expires_30_days,
+ control.cloudfront_distribution_encryption_in_transit_enabled,
+ control.elb_application_lb_drop_http_headers,
+ control.elb_application_lb_redirect_http_request_to_https,
+ control.elb_classic_lb_use_ssl_certificate,
+ control.elb_listener_use_secure_ssl_cipher,
+ control.es_domain_node_to_node_encryption_enabled,
+ control.redshift_cluster_encryption_in_transit_enabled,
+ control.s3_bucket_enforces_ssl
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_4_common_tags, {
+ pci_dss_v321_item_id = "4.1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_4_1_a" {
+ title = "4.1.a Identify all locations where cardholder data is transmitted or received over open, public networks"
+ description = "Examine documented standards and compare to system configurations to verify the use of security protocols and strong cryptography for all locations. Sensitive information must be encrypted during transmission over public networks, because it is easy and common for a malicious individual to intercept and/or divert data while in transit. Secure transmission of cardholder data requires using trusted keys/certificates, a secure protocol for transport, and proper encryption strength to encrypt cardholder data. Connection requests from systems that do not support the required encryption strength, and that would result in an insecure connection, should not be accepted. Note that some protocol implementations (such as SSL, SSH v1.0, and early TLS) have known vulnerabilities that an attacker can use to gain control of the affected system. Whichever security protocol is used, ensure it is configured to use only secure versions and configurations to prevent use of an insecure connection—for example, by using only trusted certificates and supporting only strong encryption (not supporting weaker, insecure protocols or methods). Verifying that certificates are trusted (for example, have not expired and are issued from a trusted source) helps ensure the integrity of the secure connection. Generally, the web page URL should begin with `HTTPS` and/or the web browser display a padlock icon somewhere in the window of the browser. Many TLS certificate vendors also provide a highly visible verification seal— sometimes referred to as a “security seal,” `secure site seal,` or “secure trust seal”)—which may provide the ability to click on the seal to reveal information about the website. Refer to industry standards and best practices for information on strong cryptography and secure protocols (e.g., NIST SP 800-52 and SP 800-57, OWASP, etc.) Note: SSL/early TLS is not considered strong cryptography and may not be used as a security control, except by POS POI terminals that are verified as not being susceptible to known exploits and the termination points to which they connect as defined in Appendix A2."
+
+ children = [
+ control.acm_certificate_expires_30_days,
+ control.cloudfront_distribution_custom_origins_encryption_in_transit_enabled,
+ control.cloudfront_distribution_encryption_in_transit_enabled,
+ control.cloudfront_distribution_no_deprecated_ssl_protocol,
+ control.elb_application_lb_drop_http_headers,
+ control.elb_application_lb_redirect_http_request_to_https,
+ control.elb_application_network_lb_use_ssl_certificate,
+ control.elb_classic_lb_use_ssl_certificate,
+ control.elb_classic_lb_use_tls_https_listeners,
+ control.es_domain_node_to_node_encryption_enabled,
+ control.kinesis_stream_server_side_encryption_enabled,
+ control.opensearch_domain_https_required,
+ control.redshift_cluster_encryption_in_transit_enabled,
+ control.s3_bucket_enforces_ssl
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_4_common_tags, {
+ pci_dss_v321_item_id = "4.1.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_4_1_d" {
+ title = "4.1.d Examine keys and certificates to verify that only trusted keys and/or certificates are accepted"
+ description = "Sensitive information must be encrypted during transmission over public networks, because it is easy and common for a malicious individual to intercept and/or divert data while in transit. Secure transmission of cardholder data requires using trusted keys/certificates, a secure protocol for transport, and proper encryption strength to encrypt cardholder data. Connection requests from systems that do not support the required encryption strength, and that would result in an insecure connection, should not be accepted. Note that some protocol implementations (such as SSL, SSH v1.0, and early TLS) have known vulnerabilities that an attacker can use to gain control of the affected system. Whichever security protocol is used, ensure it is configured to use only secure versions and configurations to prevent use of an insecure connection—for example, by using only trusted certificates and supporting only strong encryption (not supporting weaker, insecure protocols or methods). Verifying that certificates are trusted (for example, have not expired and are issued from a trusted source) helps ensure the integrity of the secure connection. Generally, the web page URL should begin with `HTTPS` and/or the web browser display a padlock icon somewhere in the window of the browser. Many TLS certificate vendors also provide a highly visible verification seal— sometimes referred to as a “security seal,” `secure site seal,` or “secure trust seal”)—which may provide the ability to click on the seal to reveal information about the website. Refer to industry standards and best practices for information on strong cryptography and secure protocols (e.g., NIST SP 800-52 and SP 800-57, OWASP, etc.) Note: SSL/early TLS is not considered strong cryptography and may not be used as a security control, except by POS POI terminals that are verified as not being susceptible to known exploits and the termination points to which they connect as defined in Appendix A2."
+
+ children = [
+ control.acm_certificate_expires_30_days,
+ control.elb_classic_lb_use_ssl_certificate
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_4_common_tags, {
+ pci_dss_v321_item_id = "4.1.d"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_4_1_e" {
+ title = "4.1.e Examine system configurations to verify that the protocol is implemented to use only secure configurations and does not support insecure versions or configurations"
+ description = "Sensitive information must be encrypted during transmission over public networks, because it is easy and common for a malicious individual to intercept and/or divert data while in transit. Secure transmission of cardholder data requires using trusted keys/certificates, a secure protocol for transport, and proper encryption strength to encrypt cardholder data. Connection requests from systems that do not support the required encryption strength, and that would result in an insecure connection, should not be accepted. Note that some protocol implementations (such as SSL, SSH v1.0, and early TLS) have known vulnerabilities that an attacker can use to gain control of the affected system. Whichever security protocol is used, ensure it is configured to use only secure versions and configurations to prevent use of an insecure connection—for example, by using only trusted certificates and supporting only strong encryption (not supporting weaker, insecure protocols or methods). Verifying that certificates are trusted (for example, have not expired and are issued from a trusted source) helps ensure the integrity of the secure connection. Generally, the web page URL should begin with `HTTPS` and/or the web browser display a padlock icon somewhere in the window of the browser. Many TLS certificate vendors also provide a highly visible verification seal— sometimes referred to as a “security seal,” `secure site seal,` or “secure trust seal”)—which may provide the ability to click on the seal to reveal information about the website. Refer to industry standards and best practices for information on strong cryptography and secure protocols (e.g., NIST SP 800-52 and SP 800-57, OWASP, etc.) Note: SSL/early TLS is not considered strong cryptography and may not be used as a security control, except by POS POI terminals that are verified as not being susceptible to known exploits and the termination points to which they connect as defined in Appendix A2."
+
+ children = [
+ control.cloudfront_distribution_no_deprecated_ssl_protocol,
+ control.vpc_flow_logs_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_4_common_tags, {
+ pci_dss_v321_item_id = "4.1.e"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_4_1_f" {
+ title = "4.1.f Examine system configurations to verify that the proper encryption strength is implemented for the encryption methodology in use"
+ description = "Sensitive information must be encrypted during transmission over public networks, because it is easy and common for a malicious individual to intercept and/or divert data while in transit. Secure transmission of cardholder data requires using trusted keys/certificates, a secure protocol for transport, and proper encryption strength to encrypt cardholder data. Connection requests from systems that do not support the required encryption strength, and that would result in an insecure connection, should not be accepted. Note that some protocol implementations (such as SSL, SSH v1.0, and early TLS) have known vulnerabilities that an attacker can use to gain control of the affected system. Whichever security protocol is used, ensure it is configured to use only secure versions and configurations to prevent use of an insecure connection—for example, by using only trusted certificates and supporting only strong encryption (not supporting weaker, insecure protocols or methods). Verifying that certificates are trusted (for example, have not expired and are issued from a trusted source) helps ensure the integrity of the secure connection. Generally, the web page URL should begin with `HTTPS` and/or the web browser display a padlock icon somewhere in the window of the browser. Many TLS certificate vendors also provide a highly visible verification seal— sometimes referred to as a “security seal,” `secure site seal,` or “secure trust seal”)—which may provide the ability to click on the seal to reveal information about the website. Refer to industry standards and best practices for information on strong cryptography and secure protocols (e.g., NIST SP 800-52 and SP 800-57, OWASP, etc.) Note: SSL/early TLS is not considered strong cryptography and may not be used as a security control, except by POS POI terminals that are verified as not being susceptible to known exploits and the termination points to which they connect as defined in Appendix A2."
+
+ children = [
+ control.cloudfront_distribution_no_deprecated_ssl_protocol,
+ control.vpc_flow_logs_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_4_common_tags, {
+ pci_dss_v321_item_id = "4.1.f"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_4_1_g" {
+ title = "4.1.g For TLS implementations, examine system configurations to verify that TLS is enabled whenever cardholder data is transmitted or received"
+ description = "For example, for browser-based implementations “HTTPS” appears as the browser Universal Record Locator (URL) protocol, and cardholder data is only requested if “HTTPS” appears as part of the URL. Sensitive information must be encrypted during transmission over public networks, because it is easy and common for a malicious individual to intercept and/or divert data while in transit. Secure transmission of cardholder data requires using trusted keys/certificates, a secure protocol for transport, and proper encryption strength to encrypt cardholder data. Connection requests from systems that do not support the required encryption strength, and that would result in an insecure connection, should not be accepted. Note that some protocol implementations (such as SSL, SSH v1.0, and early TLS) have known vulnerabilities that an attacker can use to gain control of the affected system. Whichever security protocol is used, ensure it is configured to use only secure versions and configurations to prevent use of an insecure connection—for example, by using only trusted certificates and supporting only strong encryption (not supporting weaker, insecure protocols or methods). Verifying that certificates are trusted (for example, have not expired and are issued from a trusted source) helps ensure the integrity of the secure connection. Generally, the web page URL should begin with `HTTPS` and/or the web browser display a padlock icon somewhere in the window of the browser. Many TLS certificate vendors also provide a highly visible verification seal— sometimes referred to as a “security seal,” `secure site seal,` or “secure trust seal”)—which may provide the ability to click on the seal to reveal information about the website. Refer to industry standards and best practices for information on strong cryptography and secure protocols (e.g., NIST SP 800-52 and SP 800-57, OWASP, etc.) Note: SSL/early TLS is not considered strong cryptography and may not be used as a security control, except by POS POI terminals that are verified as not being susceptible to known exploits and the termination points to which they connect as defined in Appendix A2."
+
+ children = [
+ control.acm_certificate_expires_30_days,
+ control.cloudfront_distribution_custom_origins_encryption_in_transit_enabled,
+ control.cloudfront_distribution_encryption_in_transit_enabled,
+ control.cloudfront_distribution_no_deprecated_ssl_protocol,
+ control.elb_application_lb_drop_http_headers,
+ control.elb_application_lb_redirect_http_request_to_https,
+ control.elb_application_network_lb_use_ssl_certificate,
+ control.elb_classic_lb_use_ssl_certificate,
+ control.elb_classic_lb_use_tls_https_listeners,
+ control.es_domain_node_to_node_encryption_enabled,
+ control.redshift_cluster_encryption_in_transit_enabled,
+ control.s3_bucket_enforces_ssl
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_4_common_tags, {
+ pci_dss_v321_item_id = "4.1.g"
+ })
+}
diff --git a/pci_dss_v321/requirement_5.sp b/pci_dss_v321/requirement_5.sp
new file mode 100644
index 00000000..07cf89f6
--- /dev/null
+++ b/pci_dss_v321/requirement_5.sp
@@ -0,0 +1,56 @@
+locals {
+ pci_dss_v321_requirement_5_common_tags = merge(local.pci_dss_v321_common_tags, {
+ control_set = "5"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_5" {
+ title = "Requirement 5: Protect all systems against malware and regularly update anti-virus software or programs"
+ description = "Malicious software, commonly referred to as “malware”—including viruses, worms, and Trojans—enters the network during many businessapproved activities including employee e-mail and use of the Internet, mobile computers, and storage devices, resulting in the exploitation of system vulnerabilities. Anti-virus software must be used on all systems commonly affected by malware to protect systems from current and evolving malicious software threats"
+
+ children = [
+ benchmark.pci_dss_v321_requirement_5_1,
+ benchmark.pci_dss_v321_requirement_5_2
+ ]
+
+ tags = local.pci_dss_v321_requirement_5_common_tags
+}
+
+benchmark "pci_dss_v321_requirement_5_1" {
+ title = "5.1 For a sample of system components including all operating system types commonly affected by malicious software, verify that anti-virus software is deployed if applicable anti-virus technology exists"
+ description = "There is a constant stream of attacks using widely published exploits, often called `zero day` (an attack that exploits a previously unknown vulnerability), against otherwise secured systems. Without an anti-virus solution that is updated regularly, these new forms of malicious software can attack systems, disable a network, or lead to compromise of data."
+
+ children = [
+ control.ssm_managed_instance_compliance_association_compliant
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_5_common_tags, {
+ pci_dss_v321_item_id = "5.1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_5_2" {
+ title = "5.2 Ensure that all anti-virus mechanisms are maintained"
+ description = "Anti-virus mechanisms should be maintained as follows: are kept current, perform periodic scans and generate audit logs which are retained per PCI DSS Requirement 10.7. Even the best anti-virus solutions are limited in effectiveness if they are not maintained and kept current with the latest security updates, signature files, or malware protections. Audit logs provide the ability to monitor virus and malware activity and anti-malware reactions. Thus, it is imperative that anti-malware solutions be configured to generate audit logs and that these logs be managed in accordance with Requirement 10."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_5_2_c
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_5_common_tags, {
+ pci_dss_v321_item_id = "5.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_5_2_c" {
+ title = "5.2.c Examine a sample of system components, including all operating system types commonly affected by malicious software, to verify that the anti-virus software and definitions are current and periodic scans are performed"
+ description = "Even the best anti-virus solutions are limited in effectiveness if they are not maintained and kept current with the latest security updates, signature files, or malware protections. Audit logs provide the ability to monitor virus and malware activity and anti-malware reactions. Thus, it is imperative that anti-malware solutions be configured to generate audit logs and that these logs be managed in accordance with Requirement 10."
+
+ children = [
+ control.ssm_managed_instance_compliance_association_compliant
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_5_common_tags, {
+ pci_dss_v321_item_id = "5.2.c"
+ })
+}
diff --git a/pci_dss_v321/requirement_6.sp b/pci_dss_v321/requirement_6.sp
new file mode 100644
index 00000000..e39af8e4
--- /dev/null
+++ b/pci_dss_v321/requirement_6.sp
@@ -0,0 +1,225 @@
+locals {
+ pci_dss_v321_requirement_6_common_tags = merge(local.pci_dss_v321_common_tags, {
+ control_set = "6"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_6" {
+ title = "Requirement 6: Develop and maintain secure systems and applications"
+ description = "Unscrupulous individuals use security vulnerabilities to gain privileged access to systems. Many of these vulnerabilities are fixed by vendorprovided security patches, which must be installed by the entities that manage the systems."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_6_1,
+ benchmark.pci_dss_v321_requirement_6_2,
+ benchmark.pci_dss_v321_requirement_6_3,
+ benchmark.pci_dss_v321_requirement_6_5,
+ benchmark.pci_dss_v321_requirement_6_6
+ ]
+
+ tags = local.pci_dss_v321_requirement_6_common_tags
+}
+
+benchmark "pci_dss_v321_requirement_6_1" {
+ title = "6.1 Establish a process to identify security vulnerabilities, using reputable outside sources for security vulnerability information, and assign a risk ranking (for example, as “high,” “medium,” or “low”) to newly discovered security vulnerabilities"
+ description = "The intent of this requirement is that organizations keep up to date with new vulnerabilities that may impact their environment. Sources for vulnerability information should be trustworthy and often include vendor websites, industry news groups, mailing list, or RSS feeds. Once an organization identifies a vulnerability that could affect their environment, the risk that the vulnerability poses must be evaluated and ranked. The organization must therefore have a method in place to evaluate vulnerabilities on an ongoing basis and assign risk rankings to those vulnerabilities. This is not achieved by an ASV scan or internal vulnerability scan, rather this requires a process to actively monitor industry sources for vulnerability information. Classifying the risks (for example, as “high,” “medium,” or “low”) allows organizations to identify, prioritize, and address the highest risk items more quickly and reduce the likelihood that vulnerabilities posing the greatest risk will be exploited."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_6_1_b
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_6_common_tags, {
+ pci_dss_v321_item_id = "6.1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_6_1_b" {
+ title = "6.1.b Interview responsible personnel and observe processes to verify that new security vulnerabilities are identified, a risk ranking is assigned to vulnerabilities that includes identification of all “high risk” and “critical” vulnerabilities and processes to identify new security vulnerabilities include using reputable outside sources for security vulnerability information"
+ description = "The intent of this requirement is that organizations keep up to date with new vulnerabilities that may impact their environment. Sources for vulnerability information should be trustworthy and often include vendor websites, industry news groups, mailing list, or RSS feeds. Once an organization identifies a vulnerability that could affect their environment, the risk that the vulnerability poses must be evaluated and ranked. The organization must therefore have a method in place to evaluate vulnerabilities on an ongoing basis and assign risk rankings to those vulnerabilities. This is not achieved by an ASV scan or internal vulnerability scan, rather this requires a process to actively monitor industry sources for vulnerability information. Classifying the risks (for example, as “high,” “medium,” or “low”) allows organizations to identify, prioritize, and address the highest risk items more quickly and reduce the likelihood that vulnerabilities posing the greatest risk will be exploited."
+
+ children = [
+ control.guardduty_finding_archived
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_6_common_tags, {
+ pci_dss_v321_item_id = "6.1.b"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_6_2" {
+ title = "6.2 Ensure that all system components and software are protected from known vulnerabilities by installing applicable vendor- supplied security patches"
+ description = "Install critical security patches within one month of release. Note: Critical security patches should be identified according to the risk ranking process defined in Requirement 6.1. There is a constant stream of attacks using widely published exploits, often called `zero day` (an attack that exploits a previously unknown vulnerability), against otherwise secured systems. If the most recent patches are not implemented on critical systems as soon as possible, a malicious individual can use these exploits to attack or disable a system, or gain access to sensitive data. Prioritizing patches for critical infrastructure ensures that high-priority systems and devices are protected from vulnerabilities as soon as possible after a patch is released. Consider prioritizing patch installations such that security patches for critical or at-risk systems are installed within 30 days, and other lower-risk patches are installed within 2-3 months. This requirement applies to applicable patches for all installed software, including payment applications (both those that are PA-DSS validated and those that are not)."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_6_2_b,
+ control.ec2_instance_ssm_managed,
+ control.ssm_managed_instance_compliance_patch_compliant
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_6_common_tags, {
+ pci_dss_v321_item_id = "6.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_6_2_b" {
+ title = "6.2.b For a sample of system components and related software, compare the list of security patches installed on each system to the most recent vendor security-patch list, to verify that the applicable critical vendor-supplied security patches are installed within one month of release, all applicable vendor-supplied security patches are installed within an appropriate time frame (for example, within three months)"
+ description = "There is a constant stream of attacks using widely published exploits, often called `zero day` (an attack that exploits a previously unknown vulnerability), against otherwise secured systems. If the most recent patches are not implemented on critical systems as soon as possible, a malicious individual can use these exploits to attack or disable a system, or gain access to sensitive data. Prioritizing patches for critical infrastructure ensures that high-priority systems and devices are protected from vulnerabilities as soon as possible after a patch is released. Consider prioritizing patch installations such that security patches for critical or at-risk systems are installed within 30 days, and other lower-risk patches are installed within 2-3 months. This requirement applies to applicable patches for all installed software, including payment applications (both those that are PA-DSS validated and those that are not)."
+
+ children = [
+ control.eks_cluster_with_latest_kubernetes_version,
+ control.rds_db_instance_automatic_minor_version_upgrade_enabled,
+ control.ssm_managed_instance_compliance_patch_compliant
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_6_common_tags, {
+ pci_dss_v321_item_id = "6.2.b"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_6_3" {
+ title = "6.3 Develop internal and external software applications (including web-based administrative access to applications) securely"
+ description = "Develop internal and external software application securely as follows: in accordance with PCI DSS (for example, secure authentication and logging), based on industry standards and/or best practices, incorporating information security throughout the software-development life cycle. Note: this applies to all software developed internally as well as bespoke or custom software developed by a third party. Without the inclusion of security during the requirements definition, design, analysis, and testing phases of software development, security vulnerabilities can be inadvertently or maliciously introduced into the production environment. Understanding how sensitive data is handled by the application—including when stored, transmitted, and when in memory—can help identify where data needs to be protected."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_6_3_1,
+ benchmark.pci_dss_v321_requirement_6_3_2,
+ benchmark.pci_dss_v321_requirement_6_3_a,
+ benchmark.pci_dss_v321_requirement_6_3_b,
+ benchmark.pci_dss_v321_requirement_6_3_c
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_6_common_tags, {
+ pci_dss_v321_item_id = "6.3"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_6_3_1" {
+ title = "6.3.1 Examine written software-development procedures and interview responsible personnel to verify that pre-production and/or custom application accounts, user IDs and/or passwords are removed before an application goes into production or is released to customers"
+ description = "Development, test and/or custom application accounts, user IDs, and passwords should be removed from production code before the application becomes active or is released to customers, since these items may give away information about the functioning of the application. Possession of such information could facilitate compromise of the application and related cardholder data."
+
+ children = [
+ control.codebuild_project_environment_privileged_mode_disabled,
+ control.codebuild_project_plaintext_env_variables_no_sensitive_aws_values,
+ control.codebuild_project_source_repo_oauth_configured
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_6_common_tags, {
+ pci_dss_v321_item_id = "6.3.1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_6_3_2" {
+ title = "6.3.2 Review custom code prior to release to production or customers in order to identify any potential coding vulnerability (using either manual or automated processes)"
+ description = "Custom code prior to release to production should include the following: code changes are reviewed by individuals other than the originating code author, and by individuals knowledgeable about code-review techniques and secure coding practices, code reviews ensure code is developed according to secure coding guidelines, appropriate corrections are implemented prior to release, code-review results are reviewed and approved by management prior to release and security vulnerabilities in custom code are commonly exploited by malicious individuals to gain access to a network and compromise cardholder data. An individual knowledgeable and experienced in code-review techniques should be involved in the review process. Code reviews should be performed by someone other than the developer of the code to allow for an independent, objective review. Automated tools or processes may also be used in lieu of manual reviews, but keep in mind that it may be difficult or even impossible for an automated tool to identify some coding issues. Correcting coding errors before the code is deployed into a production environment or released to customers prevents the code exposing the environments to potential exploit. Faulty code is also far more difficult and expensive to address after it has been deployed or released into production environments. Including a formal review and signoff by management prior to release helps to ensure that code is approved and has been developed in accordance with policies and procedures."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_6_3_2_b
+ ]
+ tags = merge(local.pci_dss_v321_requirement_6_common_tags, {
+ pci_dss_v321_item_id = "6.3.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_6_3_2_b" {
+ title = "6.3.2.b Select a sample of recent custom application changes and verify that custom application code is reviewed according to 6.3.2.a, above"
+ description = "Security vulnerabilities in custom code are commonly exploited by malicious individuals to gain access to a network and compromise cardholder data. An individual knowledgeable and experienced in code-review techniques should be involved in the review process. Code reviews should be performed by someone other than the developer of the code to allow for an independent, objective review. Automated tools or processes may also be used in lieu of manual reviews, but keep in mind that it may be difficult or even impossible for an automated tool to identify some coding issues. Correcting coding errors before the code is deployed into a production environment or released to customers prevents the code exposing the environments to potential exploit. Faulty code is also far more difficult and expensive to address after it has been deployed or released into production environments. Including a formal review and signoff by management prior to release helps to ensure that code is approved and has been developed in accordance with policies and procedures."
+
+ children = [
+ control.codedeploy_deployment_group_lambda_allatonce_traffic_shift_disabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_6_common_tags, {
+ pci_dss_v321_item_id = "6.3.2.b"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_6_3_a" {
+ title = "6.3.a Examine written software-development processes to verify that the processes are based on industry standards and/or best practices"
+ description = "Without the inclusion of security during the requirements definition, design, analysis, and testing phases of software development, security vulnerabilities can be inadvertently or maliciously introduced into the production environment. Understanding how sensitive data is handled by the application—including when stored, transmitted, and when in memory—can help identify where data needs to be protected."
+
+ children = [
+ control.codebuild_project_environment_privileged_mode_disabled,
+ control.codebuild_project_plaintext_env_variables_no_sensitive_aws_values,
+ control.codebuild_project_source_repo_oauth_configured
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_6_common_tags, {
+ pci_dss_v321_item_id = "6.3.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_6_3_b" {
+ title = "6.3.b Examine written software-development processes to verify that information security is included throughout the life cycle"
+ description = "Without the inclusion of security during the requirements definition, design, analysis, and testing phases of software development, security vulnerabilities can be inadvertently or maliciously introduced into the production environment. Understanding how sensitive data is handled by the application—including when stored, transmitted, and when in memory—can help identify where data needs to be protected."
+
+ children = [
+ control.codebuild_project_environment_privileged_mode_disabled,
+ control.codebuild_project_plaintext_env_variables_no_sensitive_aws_values,
+ control.codebuild_project_source_repo_oauth_configured
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_6_common_tags, {
+ pci_dss_v321_item_id = "6.3.b"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_6_3_c" {
+ title = "6.3.c Examine written software-development processes to verify that software applications are developed in accordance with PCI DSS"
+ description = "Without the inclusion of security during the requirements definition, design, analysis, and testing phases of software development, security vulnerabilities can be inadvertently or maliciously introduced into the production environment. Understanding how sensitive data is handled by the application—including when stored, transmitted, and when in memory—can help identify where data needs to be protected."
+
+ children = [
+ control.codebuild_project_environment_privileged_mode_disabled,
+ control.codebuild_project_plaintext_env_variables_no_sensitive_aws_values,
+ control.codebuild_project_source_repo_oauth_configured
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_6_common_tags, {
+ pci_dss_v321_item_id = "6.3.c"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_6_5" {
+ title = "6.5 Address common coding vulnerabilities in software-development processes like train developers at least annually in up-to-date secure coding techniques etc"
+ description = "Common coding vulnerabilities in software-development processes as follows: train developers at least annually in up- to-date secure coding techniques, including how to avoid common coding vulnerabilities, develop applications based on secure coding guidelines. Note: The vulnerabilities listed at 6.5.1 through 6.5.10 were current with industry best practices when this version of PCI DSS was published. However, as industry best practices for vulnerability management are updated (for example, the OWASP Guide, SANS CWE Top 25, CERT Secure Coding, etc.), the current best practices must be used for these requirements. The application layer is high-risk and may be targeted by both internal and external threats. Requirements 6.5.1 through 6.5.10 are the minimum controls that should be in place, and organizations should incorporate the relevant secure coding practices as applicable to the particular technology in their environment. Application developers should be properly trained to identify and resolve issues related to these (and other) common coding vulnerabilities. Having staff knowledgeable of secure coding guidelines should minimize the number of security vulnerabilities introduced through poor coding practices. Training for developers may be provided in-house or by third parties and should be applicable for technology used. As industry-accepted secure coding practices change, organizational coding practices and developer training should likewise be updated to address new threats—for example, memory scraping attacks. The vulnerabilities identified in 6.5.1 through 6.5.10 provide a minimum baseline. It is up to the organization to remain up to date with vulnerability trends and incorporate appropriate measures into their secure coding practices."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_6_5_8
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_6_common_tags, {
+ pci_dss_v321_item_id = "6.5"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_6_5_8" {
+ title = "6.5.8 Examine software-development policies and procedures and interview responsible personnel to verify that improper access control—such as insecure direct object references, failure to restrict URL access, and directory traversal—is addressed by coding technique"
+ description = "Directory traversal—is addressed by coding technique should include: proper authentication of users, sanitizing input, not exposing internal object references to users and user interfaces that do not permit access to unauthorized functions. A direct object reference occurs when a developer exposes a reference to an internal implementation object, such as a file, directory, database record, or key, as a URL or form parameter. Attackers can manipulate those references to access other objects without authorization. Consistently enforce access control in presentation layer and business logic for all URLs. Frequently, the only way an application protects sensitive functionality is by preventing the display of links or URLs to unauthorized users. Attackers can use this weakness to access and perform unauthorized operations by accessing those URLs directly. An attacker may be able to enumerate and navigate the directory structure of a website (directory traversal) thus gaining access to unauthorized information as well as gaining further insight into the workings of the site for later exploitation. If user interfaces permit access to unauthorized functions, this access could result in unauthorized individuals gaining access to privileged credentials or cardholder data. Only authorized users should be permitted to access direct object references to sensitive resources. Limiting access to data resources will help prevent cardholder data from being presented to unauthorized resources."
+
+ children = [
+ control.codebuild_project_environment_privileged_mode_disabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_6_common_tags, {
+ pci_dss_v321_item_id = "6.5.8"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_6_6" {
+ title = "6.6 For public-facing web applications, ensure that either one of the requirements are in place"
+ description = "Verify following methods is in place as follows: examine documented processes, interview personnel, and examine records of application security assessments to verify that public-facing web applications are reviewed—using either manual or automated vulnerability security assessment tools or methods—as follows: - At least annually - After any changes - By an organization that specializes in application security - That, at a minimum, all vulnerabilities in Requirement 6.5 are included in the assessment - That all vulnerabilities are corrected - That the application is re-evaluated after the corrections. Examine the system configuration settings and interview responsible personnel to verify that an automated technical solution that detects and prevents web-based attacks (for example, a web-application firewall) is in place as follows: - Is situated in front of public-facing web applications to detect and prevent web-based attacks. - Is actively running and up to date as applicable. - Is generating audit logs. - Is configured to either block web-based attacks, or generate an alert that is immediately investigated.Public-facing web applications are primary targets for attackers, and poorly coded web applications provide an easy path for attackers to gain access to sensitive data and systems. The requirement for reviewing applications or installing web-application firewalls is intended to reduce the number of compromises on public-facing web applications due to poor coding or application management practices. Manual or automated vulnerability security assessment tools or methods review and/or test the application for vulnerabilities, web-application firewalls filter and block non- essential traffic at the application layer. Used in conjunction with a network-based firewall, a properly configured web-application firewall prevents application-layer attacks if applications are improperly coded or configured. This can be achieved through a combination of technology and process. Process-based solutions must have mechanisms that facilitate timely responses to alerts in order to meet the intent of this requirement, which is to prevent attacks. Note: “An organization that specializes in application security” can be either a third-party company or an internal organization, as long as the reviewers specialize in application security and can demonstrate independence from the development team. For public-facing web applications, address new threats and vulnerabilities on an ongoing basis and ensure these applications are protected against known attacks by either of the following methods: reviewing public-facing web applications via manual or automated application vulnerability security assessment tools or methods, at least annually and after any changes Note: This assessment is not the same as the vulnerability scans performed for Requirement 11.2., Installing an automated technical solution that detects and prevents web- based attacks (for example, a web- application firewall) in front of public- facing web applications, to continually check all traffic."
+
+ children = [
+ control.apigateway_stage_use_waf_web_acl,
+ control.elb_application_lb_desync_mitigation_mode,
+ control.elb_application_lb_waf_enabled,
+ control.elb_classic_lb_desync_mitigation_mode,
+ control.waf_regional_rule_condition_attached,
+ control.waf_rule_condition_attached,
+ control.waf_rule_group_rule_attached,
+ control.waf_web_acl_resource_associated,
+ control.waf_web_acl_rule_attached
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_6_common_tags, {
+ pci_dss_v321_item_id = "6.6"
+ })
+}
diff --git a/pci_dss_v321/requirement_7.sp b/pci_dss_v321/requirement_7.sp
new file mode 100644
index 00000000..bd83b408
--- /dev/null
+++ b/pci_dss_v321/requirement_7.sp
@@ -0,0 +1,100 @@
+locals {
+ pci_dss_v321_requirement_7_common_tags = merge(local.pci_dss_v321_common_tags, {
+ control_set = "7"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_7" {
+ title = "Requirement 7: Restrict access to cardholder data by business need to know"
+ description = "To ensure critical data can only be accessed by authorized personnel, systems and processes must be in place to limit access based on need to know and according to job responsibilities."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_7_1,
+ benchmark.pci_dss_v321_requirement_7_2
+ ]
+
+ tags = local.pci_dss_v321_requirement_7_common_tags
+}
+
+benchmark "pci_dss_v321_requirement_7_1" {
+ title = "7.1 Limit access to system components and cardholder data to only those individuals whose job requires such access"
+ description = "The more people who have access to cardholder data, the more risk there is that a user's account will be used maliciously. Limiting access to those with a legitimate business reason for the access helps an organization prevent mishandling of cardholder data through inexperience or malice."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_7_1_2
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_7_common_tags, {
+ pci_dss_v321_item_id = "7.1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_7_1_2" {
+ title = "7.1.2 Restrict access to privileged user IDs to least privileges necessary to perform job responsibilities"
+ description = "When assigning privileged IDs, it is important to assign individuals only the privileges they need to perform their job (the “least privileges”). For example, the database administrator or backup administrator should not be assigned the same privileges as the overall systems administrator."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_7_1_2_a
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_7_common_tags, {
+ pci_dss_v321_item_id = "7.1.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_7_1_2_a" {
+ title = "7.1.2.a Interview personnel responsible for assigning access to verify that access to privileged user IDs is assigned only to roles that specifically require such privileged access and restricted to least privileges necessary to perform job responsibilities"
+ description = "When assigning privileged IDs, it is important to assign individuals only the privileges they need to perform their job (the “least privileges”). For example, the database administrator or backup administrator should not be assigned the same privileges as the overall systems administrator."
+
+ children = [
+ control.efs_access_point_enforce_user_identity,
+ control.iam_all_policy_no_service_wild_card,
+ control.opensearch_domain_fine_grained_access_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_7_common_tags, {
+ pci_dss_v321_item_id = "7.1.2.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_7_2" {
+ title = "7.2 Examine system settings and vendor documentation to verify that an access control system(s)"
+ description = "Without a mechanism to restrict access based on user's need to know, a user may unknowingly be granted access to cardholder data. Access control systems automate the process of restricting access and assigning privileges. Additionally, a default “deny-all” setting ensures no one is granted access until and unless a rule is established specifically granting such access. Entities may have one or more access controls systems to manage user access. Note: Some access control systems are set by default to “allow-all,” thereby permitting access unless/until a rule is written to specifically deny it."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_7_2_1
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_7_common_tags, {
+ pci_dss_v321_item_id = "7.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_7_2_1" {
+ title = "7.2.1 Confirm that access control systems are in place on all system components"
+ description = "Without a mechanism to restrict access based on user's need to know, a user may unknowingly be granted access to cardholder data. Access control systems automate the process of restricting access and assigning privileges. Additionally, a default “deny-all” setting ensures no one is granted access until and unless a rule is established specifically granting such access. Entities may have one or more access controls systems to manage user access. Note: Some access control systems are set by default to “allow-all,” thereby permitting access unless/until a rule is written to specifically deny it."
+ children = [
+ control.dms_replication_instance_not_publicly_accessible,
+ control.ebs_snapshot_not_publicly_restorable,
+ control.ec2_instance_not_publicly_accessible,
+ control.eks_cluster_endpoint_restrict_public_access,
+ control.emr_cluster_master_nodes_no_public_ip,
+ control.es_domain_in_vpc,
+ control.iam_policy_no_star_star,
+ control.iam_root_user_no_access_keys,
+ control.iam_user_no_inline_attached_policies,
+ control.lambda_function_restrict_public_access,
+ control.rds_db_instance_prohibit_public_access,
+ control.rds_db_snapshot_prohibit_public_access,
+ control.redshift_cluster_prohibit_public_access,
+ control.s3_bucket_restrict_public_read_access,
+ control.s3_bucket_restrict_public_write_access,
+ control.s3_public_access_block_bucket_account,
+ control.sagemaker_notebook_instance_direct_internet_access_disabled,
+ control.vpc_igw_attached_to_authorized_vpc
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_7_common_tags, {
+ pci_dss_v321_item_id = "7.2.1"
+ })
+}
diff --git a/pci_dss_v321/requirement_8.sp b/pci_dss_v321/requirement_8.sp
new file mode 100644
index 00000000..f4370af5
--- /dev/null
+++ b/pci_dss_v321/requirement_8.sp
@@ -0,0 +1,514 @@
+locals {
+ pci_dss_v321_requirement_8_common_tags = merge(local.pci_dss_v321_common_tags, {
+ control_set = "8"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8" {
+ title = "Requirement 8: Identify and authenticate access to system components"
+ description = "Assigning a unique identification (ID) to each person with access ensures that each individual is uniquely accountable for their actions. When such accountability is in place, actions taken on critical data and systems are performed by, and can be traced to, known and authorized users and processes."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_8_1,
+ benchmark.pci_dss_v321_requirement_8_2,
+ benchmark.pci_dss_v321_requirement_8_3,
+ benchmark.pci_dss_v321_requirement_8_5,
+ benchmark.pci_dss_v321_requirement_8_6,
+ benchmark.pci_dss_v321_requirement_8_7
+ ]
+
+ tags = local.pci_dss_v321_requirement_8_common_tags
+}
+
+benchmark "pci_dss_v321_requirement_8_1" {
+ title = "8.1 Define and implement policies and procedures to ensure proper user identification management for non-consumer users and administrators"
+ description = "By ensuring each user is uniquely identified— instead of using one ID for several employees—an organization can maintain individual responsibility for actions and an effective audit trail per employee. This will help speed issue resolution and containment when misuse or malicious intent occurs."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_8_1_2,
+ benchmark.pci_dss_v321_requirement_8_1_4,
+ benchmark.pci_dss_v321_requirement_8_1_5
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_1_2" {
+ title = "8.1.2 Control addition, deletion, and modification of user IDs, credentials, and other identifier objects"
+ description = "To ensure that user accounts granted access to systems are all valid and recognized users, strong processes must manage all changes to user IDs and other authentication credentials, including adding new ones and modifying or deleting existing ones."
+
+ children = [
+ control.log_metric_filter_iam_policy
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.1.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_1_4" {
+ title = "8.1.4 Observe user accounts to verify that any inactive accounts over 90 days old are either removed or disabled"
+ description = "Accounts that are not used regularly are often targets of attack since it is less likely that any changes (such as a changed password) will be noticed. As such, these accounts may be more easily exploited and used to access cardholder data."
+
+ children = [
+ control.iam_account_password_policy_strong_min_reuse_24,
+ control.iam_user_unused_credentials_90
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.1.4"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_1_5" {
+ title = "8.1.5 Manage IDs used by third parties to access, support, or maintain system components via remote access by enabling only during the time period needed and disabled when not in use"
+ description = "Allowing vendors to have 24/7 access into your network in case they need to support your systems increases the chances of unauthorized access, either from a user in the vendor's environment or from a malicious individual who finds and uses this always-available external entry point into your network. Enabling access only for the time periods needed, and disabling it as soon as it is no longer needed, helps prevent misuse of these connections. Monitoring of vendor access provides assurance that vendors are accessing only the systems necessary and only during approved time frames."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_8_1_5_a
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.1.5"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_1_5_a" {
+ title = "8.1.5.a Interview personnel and observe processes for managing accounts used by third parties to access, support, or maintain system components to verify that accounts used for remote access are disabled when not in use, enabled only when needed by the third party and disabled when not in use"
+ description = "Allowing vendors to have 24/7 access into your network in case they need to support your systems increases the chances of unauthorized access, either from a user in the vendor's environment or from a malicious individual who finds and uses this always-available external entry point into your network. Enabling access only for the time periods needed, and disabling it as soon as it is no longer needed, helps prevent misuse of these connections. Monitoring of vendor access provides assurance that vendors are accessing only the systems necessary and only during approved time frames."
+
+ children = [
+ control.cloudtrail_multi_region_trail_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.1.5.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_2" {
+ title = "8.2 To verify that users are authenticated using unique ID and additional authentication (for example, a password/phrase) for access to the cardholder data environment perform the methods like examine documentation describing the authentication method(s) used etc"
+ description = "Verify users are authenticated using unique ID, perform the following: examine documentation describing the authentication method(s) used, for each type of authentication method used and for each type of system component, observe an authentication to verify authentication is functioning consistent with documented authentication method(s). These authentication methods, when used in addition to unique IDs, help protect users' IDs from being compromised, since the one attempting the compromise needs to know both the unique ID and the password (or other authentication used). Note that a digital certificate is a valid option for “something you have” as long as it is unique for a particular user. Since one of the first steps a malicious individual will take to compromise a system is to exploit weak or nonexistent passwords, it is important to implement good processes for authentication management."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_8_2_1,
+ benchmark.pci_dss_v321_requirement_8_2_3,
+ benchmark.pci_dss_v321_requirement_8_2_4,
+ benchmark.pci_dss_v321_requirement_8_2_5,
+ control.iam_account_password_policy_strong
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_2_1" {
+ title = "8.2.1 Using strong cryptography, render all authentication credentials (such as passwords/phrases) unreadable during transmission and storage on all system components"
+ description = "Many network devices and applications transmit unencrypted, readable passwords across the network and/or store passwords without encryption. A malicious individual can easily intercept unencrypted passwords during transmission using a “sniffer,” or directly access unencrypted passwords in files where they are stored, and use this data to gain unauthorized access. Note: Testing Procedures 8.2.1.d and 8.2.1.e are additional procedures that only apply if the entity being assessed is a service provider."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_8_2_1_a,
+ benchmark.pci_dss_v321_requirement_8_2_1_b,
+ benchmark.pci_dss_v321_requirement_8_2_1_c,
+ control.cloudfront_distribution_encryption_in_transit_enabled,
+ control.codebuild_project_plaintext_env_variables_no_sensitive_aws_values,
+ control.codebuild_project_source_repo_oauth_configured,
+ control.elb_application_lb_drop_http_headers,
+ control.elb_application_lb_redirect_http_request_to_https,
+ control.elb_classic_lb_use_ssl_certificate,
+ control.elb_classic_lb_use_tls_https_listeners,
+ control.es_domain_node_to_node_encryption_enabled,
+ control.redshift_cluster_encryption_in_transit_enabled,
+ control.s3_bucket_enforces_ssl
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.2.1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_2_1_a" {
+ title = "8.2.1.a Examine vendor documentation and system configuration settings to verify that passwords are protected with strong cryptography during transmission and storage"
+ description = "Many network devices and applications transmit unencrypted, readable passwords across the network and/or store passwords without encryption. A malicious individual can easily intercept unencrypted passwords during transmission using a “sniffer,” or directly access unencrypted passwords in files where they are stored, and use this data to gain unauthorized access. Note: Testing Procedures 8.2.1.d and 8.2.1.e are additional procedures that only apply if the entity being assessed is a service provider."
+
+ children = [
+ control.cloudfront_distribution_encryption_in_transit_enabled,
+ control.cloudtrail_trail_logs_encrypted_with_kms_cmk,
+ control.dax_cluster_encryption_at_rest_enabled,
+ control.dynamodb_table_encrypted_with_kms,
+ control.dynamodb_table_encryption_enabled,
+ control.ebs_attached_volume_encryption_enabled,
+ control.ec2_ebs_default_encryption_enabled,
+ control.efs_file_system_encrypted_with_cmk,
+ control.eks_cluster_secrets_encrypted,
+ control.elb_application_lb_drop_http_headers,
+ control.elb_application_lb_redirect_http_request_to_https,
+ control.elb_classic_lb_use_ssl_certificate,
+ control.elb_classic_lb_use_tls_https_listeners,
+ control.es_domain_encryption_at_rest_enabled,
+ control.es_domain_node_to_node_encryption_enabled,
+ control.log_group_encryption_at_rest_enabled,
+ control.rds_db_instance_encryption_at_rest_enabled,
+ control.rds_db_snapshot_encrypted_at_rest,
+ control.redshift_cluster_encryption_in_transit_enabled,
+ control.redshift_cluster_encryption_logging_enabled,
+ control.s3_bucket_default_encryption_enabled_kms,
+ control.s3_bucket_default_encryption_enabled,
+ control.s3_bucket_enforces_ssl,
+ control.sagemaker_endpoint_configuration_encryption_at_rest_enabled,
+ control.sagemaker_notebook_instance_encryption_at_rest_enabled,
+ control.sns_topic_encrypted_at_rest
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.2.1.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_2_1_b" {
+ title = "8.2.1.b For a sample of system components, examine password files to verify that passwords are unreadable during storage"
+ description = "Many network devices and applications transmit unencrypted, readable passwords across the network and/or store passwords without encryption. A malicious individual can easily intercept unencrypted passwords during transmission using a “sniffer,” or directly access unencrypted passwords in files where they are stored, and use this data to gain unauthorized access. Note: Testing Procedures 8.2.1.d and 8.2.1.e are additional procedures that only apply if the entity being assessed is a service provider."
+
+ children = [
+ control.secretsmanager_secret_encrypted_with_kms_cmk
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.2.1.b"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_2_1_c" {
+ title = "8.2.1.c For a sample of system components, examine data transmissions to verify that passwords are unreadable during transmission"
+ description = "Many network devices and applications transmit unencrypted, readable passwords across the network and/or store passwords without encryption. A malicious individual can easily intercept unencrypted passwords during transmission using a “sniffer,” or directly access unencrypted passwords in files where they are stored, and use this data to gain unauthorized access. Note: Testing Procedures 8.2.1.d and 8.2.1.e are additional procedures that only apply if the entity being assessed is a service provider."
+
+ children = [
+ control.secretsmanager_secret_encrypted_with_kms_cmk
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.2.1.c"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_2_3" {
+ title = "8.2.3 Passwords/passphrases require a minimum length of at least seven characters, contain both numeric and alphabetic characters"
+ description = "The passwords/ passphrases must have complexity and strength at least equivalent to the parameters specified above. Strong passwords/passphrases are the first line of defense into a network since a malicious individual will often first try to find accounts with weak or non- existent passwords. If passwords are short or simple to guess, it is relatively easy for a malicious individual to find these weak accounts and compromise a network under the guise of a valid user ID. This requirement specifies that a minimum of seven characters and both numeric and alphabetic characters should be used for passwords/ passphrases. For cases where this minimum cannot be met due to technical limitations, entities can use “equivalent strength” to evaluate their alternative. For information on variability and equivalency of password strength (also referred to as entropy) for passwords/passphrases of different formats, refer to industry standards (e.g., the current version of NIST SP 800-63.) Note: Testing Procedure 8.2.3.b is an additional procedure that only applies if the entity being assessed is a service provider."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_8_2_3_a,
+ benchmark.pci_dss_v321_requirement_8_2_3_b,
+ control.iam_account_password_policy_strong
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.2.3"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_2_3_a" {
+ title = "8.2.3.a For a sample of system components, inspect system configuration settings to verify that user password/passphrase parameters are set to require at least the following strength/complexity that is require a minimum length of at least seven characters and contain both numeric and alphabetic characters"
+ description = "Strong passwords/passphrases are the first line of defense into a network since a malicious individual will often first try to find accounts with weak or non- existent passwords. If passwords are short or simple to guess, it is relatively easy for a malicious individual to find these weak accounts and compromise a network under the guise of a valid user ID. This requirement specifies that a minimum of seven characters and both numeric and alphabetic characters should be used for passwords/ passphrases. For cases where this minimum cannot be met due to technical limitations, entities can use “equivalent strength” to evaluate their alternative. For information on variability and equivalency of password strength (also referred to as entropy) for passwords/passphrases of different formats, refer to industry standards (e.g., the current version of NIST SP 800-63.) Note: Testing Procedure 8.2.3.b is an additional procedure that only applies if the entity being assessed is a service provider."
+
+ children = [
+ control.iam_account_password_policy_strong
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.2.3.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_2_3_b" {
+ title = "8.2.3.b Additional testing procedure for service provider assessments only to review internal processes and customer/user documentation to verify that non-consumer customer passwords/passphrases are required to meet at least the following strength/complexity that is require a minimum length of at least seven characters and contain both numeric and alphabetic characters"
+ description = "Strong passwords/passphrases are the first line of defense into a network since a malicious individual will often first try to find accounts with weak or non- existent passwords. If passwords are short or simple to guess, it is relatively easy for a malicious individual to find these weak accounts and compromise a network under the guise of a valid user ID. This requirement specifies that a minimum of seven characters and both numeric and alphabetic characters should be used for passwords/ passphrases. For cases where this minimum cannot be met due to technical limitations, entities can use “equivalent strength” to evaluate their alternative. For information on variability and equivalency of password strength (also referred to as entropy) for passwords/passphrases of different formats, refer to industry standards (e.g., the current version of NIST SP 800-63.) Note: Testing Procedure 8.2.3.b is an additional procedure that only applies if the entity being assessed is a service provider."
+
+ children = [
+ control.iam_account_password_policy_strong
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.2.3.b"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_2_4" {
+ title = "8.2.4 Change user passwords/passphrases at least once every 90 days"
+ description = "Passwords/passphrases that are valid for a long time without a change provide malicious individuals with more time to work on breaking the password/phrase. Note: Testing Procedure 8.2.4.b is an additional procedure that only applies if the entity being assessed is a service provider."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_8_2_4_a,
+ benchmark.pci_dss_v321_requirement_8_2_4_b,
+ control.iam_account_password_policy_strong
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.2.4"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_2_4_a" {
+ title = "8.2.4.a For a sample of system components, inspect system configuration settings to verify that user password/passphrase parameters are set to require users to change passwords at least once every 90 days"
+ description = "Passwords/passphrases that are valid for a long time without a change provide malicious individuals with more time to work on breaking the password/phrase. Note: Testing Procedure 8.2.4.b is an additional procedure that only applies if the entity being assessed is a service provider."
+
+ children = [
+ control.iam_account_password_policy_strong,
+ control.secretsmanager_secret_last_changed_90_day
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.2.4.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_2_4_b" {
+ title = "8.2.4.b Additional testing procedure for service provider assessments only: Review internal processes and customer/user documentation to verify that non-consumer customer user passwords/passphrases are required to change periodically; and non-consumer customer users are given guidance as to when, and under what circumstances, passwords/passphrases must change"
+ description = "Passwords/passphrases that are valid for a long time without a change provide malicious individuals with more time to work on breaking the password/phrase. Note: Testing Procedure 8.2.4.b is an additional procedure that only applies if the entity being assessed is a service provider."
+
+ children = [
+ control.iam_account_password_policy_strong
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.2.4.b"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_2_5" {
+ title = "8.2.5 Do not allow an individual to submit a new password/passphrase that is the same as any of the last four passwords/passphrases he or she has used"
+ description = "If password history isn't maintained, the effectiveness of changing passwords is reduced, as previous passwords can be reused over and over. Requiring that passwords cannot be reused for a period of time reduces the likelihood that passwords that have been guessed or brute-forced will be used in the future. Note: Testing Procedure 8.2.5.b is an additional procedure that only applies if the entity being assessed is a service provider."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_8_2_5_a,
+ benchmark.pci_dss_v321_requirement_8_2_5_b,
+ control.iam_account_password_policy_strong
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.2.5"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_2_5_a" {
+ title = "8.2.5.a For a sample of system components, obtain and inspect system configuration settings to verify that password parameters are set to require that new passwords/passphrases cannot be the same as the four previously used passwords/passphrases"
+ description = "If password history isn't maintained, the effectiveness of changing passwords is reduced, as previous passwords can be reused over and over. Requiring that passwords cannot be reused for a period of time reduces the likelihood that passwords that have been guessed or brute-forced will be used in the future. Note: Testing Procedure 8.2.5.b is an additional procedure that only applies if the entity being assessed is a service provider."
+
+ children = [
+ control.iam_account_password_policy_strong
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.2.5.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_2_5_b" {
+ title = "8.2.5.b Additional testing procedure for service provider assessments only to review internal processes and customer/user documentation to verify that new non-consumer customer user passwords/passphrase cannot be the same as the previous four passwords"
+ description = "If password history isn't maintained, the effectiveness of changing passwords is reduced, as previous passwords can be reused over and over. Requiring that passwords cannot be reused for a period of time reduces the likelihood that passwords that have been guessed or brute-forced will be used in the future. Note: Testing Procedure 8.2.5.b is an additional procedure that only applies if the entity being assessed is a service provider."
+
+ children = [
+ control.iam_account_password_policy_strong
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.2.5.b"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_3" {
+ title = "8.3 Secure all individual non-console administrative access and all remote access to the CDE using multi-factor authentication"
+ description = "Note: Multi-factor authentication requires that a minimum of two of the three authentication methods (see Requirement 8.2 for descriptions of authentication methods) be used for authentication. Using one factor twice (for example, using two separate passwords) is not considered multi-factor authentication. Multi-factor authentication requires an individual to present a minimum of two separate forms of authentication (as described in Requirement 8.2), before access is granted. Multi-factor authentication provides additional assurance that the individual attempting to gain access is who they claim to be. With multi-factor authentication, an attacker would need to compromise at least two different authentication mechanisms, increasing the difficulty of compromise and thus reducing the risk. Multi-factor authentication is not required at both the system-level and application-level for a particular system component. Multi-factor authentication can be performed either upon authentication to the particular network or to the system component. Examples of multi-factor technologies include but are not limited to remote authentication and dial-in service (RADIUS) with tokens; terminal access controller access control system (TACACS) with tokens; and other technologies that facilitate multi- factor authentication."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_8_3_1,
+ benchmark.pci_dss_v321_requirement_8_3_2
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.3"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_3_1" {
+ title = "8.3.1 Incorporate multi-factor authentication for all non-console access into the CDE for personnel with administrative access"
+ description = "This requirement is intended to apply to all personnel with administrative access to the CDE. This requirement applies only to personnel with administrative access and only for non-console access to the CDE; it does not apply to application or system accounts performing automated functions. If the entity does not use segmentation to separate the CDE from the rest of their network, an administrator could use multi-factor authentication either when logging onto the CDE network or when logging onto a system. If the CDE is segmented from the rest of the entity’s network, an administrator would need to use multi- factor authentication when connecting to a CDE system from a non-CDE network. Multi-factor authentication can be implemented at network level or at system/application level; it does not have to be both. If the administrator uses MFA when logging into the CDE network, they do not also need to use MFA to log into a particular system or application within the CDE."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_8_3_1_a,
+ control.iam_root_user_hardware_mfa_enabled,
+ control.iam_user_console_access_mfa_enabled,
+ control.iam_user_mfa_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.3.1"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_3_1_a" {
+ title = "8.3.1.a Examine network and/or system configurations, as applicable, to verify multi-factor authentication is required for all non-console administrative access into the CDE"
+ description = "This requirement is intended to apply to all personnel with administrative access to the CDE. This requirement applies only to personnel with administrative access and only for non-console access to the CDE; it does not apply to application or system accounts performing automated functions. If the entity does not use segmentation to separate the CDE from the rest of their network, an administrator could use multi-factor authentication either when logging onto the CDE network or when logging onto a system. If the CDE is segmented from the rest of the entity's network, an administrator would need to use multi- factor authentication when connecting to a CDE system from a non-CDE network. Multi-factor authentication can be implemented at network level or at system/application level; it does not have to be both. If the administrator uses MFA when logging into the CDE network, they do not also need to use MFA to log into a particular system or application within the CDE."
+
+ children = [
+ control.iam_root_user_hardware_mfa_enabled,
+ control.iam_user_console_access_mfa_enabled,
+ control.iam_user_mfa_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.3.1.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_3_2" {
+ title = "8.3.2 Incorporate multi-factor authentication for all remote network access (both user and administrator, and including third-party access for support or maintenance) originating from outside the entity's network"
+ description = "This requirement is intended to apply to all personnel—including general users, administrators, and vendors (for support or maintenance) with remote access to the network—where that remote access could lead to access to the CDE. If remote access is to an entity's network that has appropriate segmentation, such that remote users cannot access or impact the cardholder data environment, multi-factor authentication for remote access to that network would not be required. However, multi- factor authentication is required for any remote access to networks with access to the cardholder data environment, and is recommended for all remote access to the entity's networks."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_8_3_2_a
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.3.2"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_3_2_a" {
+ title = "8.3.2.a Examine system configurations for remote access servers and systems to verify multi-factor authentication is required for all remote access by personnel, both user and administrator, and all third-party/vendor remote access (including access to applications and system components for support or maintenance purposes)"
+ description = "This requirement is intended to apply to all personnel—including general users, administrators, and vendors (for support or maintenance) with remote access to the network—where that remote access could lead to access to the CDE. If remote access is to an entity's network that has appropriate segmentation, such that remote users cannot access or impact the cardholder data environment, multi-factor authentication for remote access to that network would not be required. However, multi- factor authentication is required for any remote access to networks with access to the cardholder data environment, and is recommended for all remote access to the entity's networks."
+
+ children = [
+ control.iam_root_user_hardware_mfa_enabled,
+ control.iam_user_console_access_mfa_enabled,
+ control.iam_user_mfa_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.3.2.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_5" {
+ title = "8.5 Do not use group, shared, or generic IDs, passwords, or other authentication methods"
+ description = "Generic user IDs are disabled or removed. Shared user IDs do not exist for system administration and other critical functions. Shared and generic user IDs are not used to administer any system components. If multiple users share the same authentication credentials (for example, user account and password), it becomes impossible to trace system access and activities to an individual. This in turn prevents an entity from assigning accountability for, or having effective logging of, an individual's actions, since a given action could have been performed by anyone in the group that has knowledge of the authentication credentials."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_8_5_a
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.5"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_5_a" {
+ title = "8.5.a For a sample of system components, examine user ID lists to verify that generic user IDs are disabled or removed, shared user IDs for system administration activities and other critical functions do not exist or are shared, and generic user IDs are not used to administer any system components"
+ description = "Generic user IDs are disabled or removed. Shared user IDs for system administration activities and other critical functions do not exist. Shared and generic user IDs are not used to administer any system components. If multiple users share the same authentication credentials (for example, user account and password), it becomes impossible to trace system access and activities to an individual. This in turn prevents an entity from assigning accountability for, or having effective logging of, an individual's actions, since a given action could have been performed by anyone in the group that has knowledge of the authentication credentials."
+
+ children = [
+ control.iam_root_user_no_access_keys
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.5.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_6" {
+ title = "8.6 Where other authentication mechanisms are used (for example, physical or logical security tokens, smart cards, certificates, etc.), use of these mechanisms must be assigned authentication mechanisms must be assigned to an individual account and not shared among multiple accounts, physical and/or logical controls must be in place to ensure only the intended account can use that mechanism to gain access"
+ description = "If user authentication mechanisms such as tokens, smart cards, and certificates can be used by multiple accounts, it may be impossible to identify the individual using the authentication mechanism. Having physical and/or logical controls (for example, a PIN, biometric data, or a password) to uniquely identify the user of the account will prevent unauthorized users from gaining access through use of a shared authentication mechanism."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_8_6_c
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.6"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_6_c" {
+ title = "8.6.c Examine system configuration settings and/or physical controls, as applicable, to verify that controls are implemented to ensure only the intended account can use that mechanism to gain access"
+ description = "If user authentication mechanisms such as tokens, smart cards, and certificates can be used by multiple accounts, it may be impossible to identify the individual using the authentication mechanism. Having physical and/or logical controls (for example, a PIN, biometric data, or a password) to uniquely identify the user of the account will prevent unauthorized users from gaining access through use of a shared authentication mechanism."
+
+ children = [
+ control.iam_root_user_hardware_mfa_enabled,
+ control.iam_user_console_access_mfa_enabled,
+ control.iam_user_mfa_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.6.c"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_7" {
+ title = "8.7 All access to any database containing cardholder data (including access by applications, administrators, and all other users) is restricted"
+ description = "Access to any database containing cardholder data is restricted as follows: all user access to, user queries of, and user actions on databases are through programmatic methods. Only database administrators have the ability to directly access or query databases. Application IDs for database applications can only be used by the applications (and not by individual users or other non-application processes). Without user authentication for access to databases and applications, the potential for unauthorized or malicious access increases, and such access cannot be logged since the user has not been authenticated and is therefore not known to the system. Also, database access should be granted through programmatic methods only (for example, through stored procedures), rather than via direct access to the database by end users (except for DBAs, who may need direct access to the database for their administrative duties)."
+
+ children = [
+ benchmark.pci_dss_v321_requirement_8_7_a,
+ benchmark.pci_dss_v321_requirement_8_7_b,
+ benchmark.pci_dss_v321_requirement_8_7_c,
+ benchmark.pci_dss_v321_requirement_8_7_d
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.7"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_7_a" {
+ title = "8.7.a Review database and application configuration settings and verify that all users are authenticated prior to access"
+ description = "Without user authentication for access to databases and applications, the potential for unauthorized or malicious access increases, and such access cannot be logged since the user has not been authenticated and is therefore not known to the system. Also, database access should be granted through programmatic methods only (for example, through stored procedures), rather than via direct access to the database by end users (except for DBAs, who may need direct access to the database for their administrative duties)."
+
+ children = [
+ control.rds_db_cluster_iam_authentication_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.7.a"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_7_b" {
+ title = "8.7.b Examine database and application configuration settings to verify that all user access to, user queries of, and user actions on (for example, move, copy, delete), the database are through programmatic methods only (for example, through stored procedures)"
+ description = "Without user authentication for access to databases and applications, the potential for unauthorized or malicious access increases, and such access cannot be logged since the user has not been authenticated and is therefore not known to the system. Also, database access should be granted through programmatic methods only (for example, through stored procedures), rather than via direct access to the database by end users (except for DBAs, who may need direct access to the database for their administrative duties)."
+
+ children = [
+ control.rds_db_cluster_iam_authentication_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.7.b"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_7_c" {
+ title = "8.7.c Examine database access control settings and database application configuration settings to verify that user direct access to or queries of databases are restricted to database administrators"
+ description = "Without user authentication for access to databases and applications, the potential for unauthorized or malicious access increases, and such access cannot be logged since the user has not been authenticated and is therefore not known to the system. Also, database access should be granted through programmatic methods only (for example, through stored procedures), rather than via direct access to the database by end users (except for DBAs, who may need direct access to the database for their administrative duties)."
+
+ children = [
+ control.rds_db_cluster_iam_authentication_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.7.c"
+ })
+}
+
+benchmark "pci_dss_v321_requirement_8_7_d" {
+ title = "8.7.d Examine database access control settings, database application configuration settings, and the related application IDs to verify that application IDs can only be used by the applications (and not by individual users or other processes)"
+ description = "Without user authentication for access to databases and applications, the potential for unauthorized or malicious access increases, and such access cannot be logged since the user has not been authenticated and is therefore not known to the system. Also, database access should be granted through programmatic methods only (for example, through stored procedures), rather than via direct access to the database by end users (except for DBAs, who may need direct access to the database for their administrative duties)."
+
+ children = [
+ control.rds_db_cluster_iam_authentication_enabled
+ ]
+
+ tags = merge(local.pci_dss_v321_requirement_8_common_tags, {
+ pci_dss_v321_item_id = "8.7.d"
+ })
+}
diff --git a/pci_v321/autoscaling.sp b/pci_v321/autoscaling.sp
deleted file mode 100644
index 3e956c3b..00000000
--- a/pci_v321/autoscaling.sp
+++ /dev/null
@@ -1,30 +0,0 @@
-locals {
- pci_v321_autoscaling_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/AutoScaling"
- })
-}
-
-benchmark "pci_v321_autoscaling" {
- title = "Auto Scaling"
- documentation = file("./pci_v321/docs/pci_v321_autoscaling_1.md")
- children = [
- control.pci_v321_autoscaling_1,
- ]
-
- tags = merge(local.pci_v321_autoscaling_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_autoscaling_1" {
- title = "1 Auto Scaling groups associated with a load balancer should use health checks"
- description = "This control checks whether your Auto Scaling groups that are associated with a load balancer are using Elastic Load Balancing health checks. PCI DSS does not require load balancing or highly available configurations. However, this check aligns with AWS best practices."
- severity = "low"
- query = query.autoscaling_group_with_lb_use_health_check
- documentation = file("./pci_v321/docs/pci_v321_autoscaling_1.md")
-
- tags = merge(local.pci_v321_autoscaling_common_tags, {
- pci_item_id = "autoscaling_1"
- pci_requirements = "2.2"
- })
-}
diff --git a/pci_v321/cloudtrail.sp b/pci_v321/cloudtrail.sp
deleted file mode 100644
index 97f534ac..00000000
--- a/pci_v321/cloudtrail.sp
+++ /dev/null
@@ -1,72 +0,0 @@
-locals {
- pci_v321_cloudtrail_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/CloudTrail"
- })
-}
-
-benchmark "pci_v321_cloudtrail" {
- title = "CloudTrail"
- documentation = file("./pci_v321/docs/pci_v321_cloudtrail.md")
- children = [
- control.pci_v321_cloudtrail_1,
- control.pci_v321_cloudtrail_2,
- control.pci_v321_cloudtrail_3,
- control.pci_v321_cloudtrail_4
- ]
-
- tags = merge(local.pci_v321_cloudtrail_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_cloudtrail_1" {
- title = "1 CloudTrail logs should be encrypted at rest using AWS KMS CMKs"
- description = "This control checks whether AWS CloudTrail is configured to use the server-side encryption (SSE) AWS KMS customer master key (CMK) encryption. If you are only using the default encryption option, you can choose to disable this check."
- severity = "medium"
- query = query.cloudtrail_trail_logs_encrypted_with_kms_cmk
- documentation = file("./pci_v321/docs/pci_v321_cloudtrail_1.md")
-
- tags = merge(local.pci_v321_cloudtrail_common_tags, {
- pci_item_id = "cloudtrail_1"
- pci_requirements = "3.4"
- })
-}
-
-control "pci_v321_cloudtrail_2" {
- title = "2 CloudTrail should be enabled"
- description = "This control checks whether CloudTrail is enabled in your AWS account. However, some AWS services do not enable logging of all APIs and events. You should implement any additional audit trails other than CloudTrail and review the documentation for each service."
- severity = "high"
- query = query.cloudtrail_trail_enabled
- documentation = file("./pci_v321/docs/pci_v321_cloudtrail_2.md")
-
- tags = merge(local.pci_v321_cloudtrail_common_tags, {
- pci_item_id = "cloudtrail_2"
- pci_requirements = "10.1,10.2.1,10.2.2,10.2.3,10.2.4,10.2.5,10.2.6,10.2.7,10.3.1,10.3.2,10.3.3,10.3.4,10.3.5,10.3.6"
- })
-}
-
-control "pci_v321_cloudtrail_3" {
- title = "3 CloudTrail log file validation should be enabled"
- description = "This control checks whether CloudTrail log file validation is enabled."
- severity = "low"
- query = query.cloudtrail_trail_validation_enabled
- documentation = file("./pci_v321/docs/pci_v321_cloudtrail_3.md")
-
- tags = merge(local.pci_v321_cloudtrail_common_tags, {
- pci_item_id = "cloudtrail_3"
- pci_requirements = "10.5.2,10.5.5"
- })
-}
-
-control "pci_v321_cloudtrail_4" {
- title = "4 CloudTrail trails should be integrated with CloudWatch Logs"
- description = "This control checks whether CloudTrail trails are configured to send logs to CloudWatch Logs."
- severity = "low"
- query = query.cloudtrail_trail_integrated_with_logs
- documentation = file("./pci_v321/docs/pci_v321_cloudtrail_4.md")
-
- tags = merge(local.pci_v321_cloudtrail_common_tags, {
- pci_item_id = "cloudtrail_4"
- pci_requirements = "10.5.3"
- })
-}
diff --git a/pci_v321/codebuild.sp b/pci_v321/codebuild.sp
deleted file mode 100644
index ad672a2c..00000000
--- a/pci_v321/codebuild.sp
+++ /dev/null
@@ -1,44 +0,0 @@
-locals {
- pci_v321_codebuild_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/CodeBuild"
- })
-}
-
-benchmark "pci_v321_codebuild" {
- title = "CodeBuild"
- documentation = file("./pci_v321/docs/pci_v321_codebuild.md")
- children = [
- control.pci_v321_codebuild_1,
- control.pci_v321_codebuild_2
- ]
-
- tags = merge(local.pci_v321_codebuild_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_codebuild_1" {
- title = "1 CodeBuild GitHub or Bitbucket source repository URLs should use OAuth"
- description = "This control checks whether the GitHub or Bitbucket source repository URL contains either personal access tokens or a user name and password."
- severity = "critical"
- query = query.codebuild_project_source_repo_oauth_configured
- documentation = file("./pci_v321/docs/pci_v321_codebuild_1.md")
-
- tags = merge(local.pci_v321_codebuild_common_tags, {
- pci_item_id = "codebuild_1"
- pci_requirements = "8.2.1"
- })
-}
-
-control "pci_v321_codebuild_2" {
- title = "2 CodeBuild project environment variables should not contain clear text credentials"
- description = "This control checks whether the project contains environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY."
- severity = "critical"
- query = query.codebuild_project_plaintext_env_variables_no_sensitive_aws_values
- documentation = file("./pci_v321/docs/pci_v321_codebuild_2.md")
-
- tags = merge(local.pci_v321_codebuild_common_tags, {
- pci_item_id = "codebuild_2"
- pci_requirements = "8.2.1"
- })
-}
diff --git a/pci_v321/config.sp b/pci_v321/config.sp
deleted file mode 100644
index 0a5cd90b..00000000
--- a/pci_v321/config.sp
+++ /dev/null
@@ -1,30 +0,0 @@
-locals {
- pci_v321_config_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/Config"
- })
-}
-
-benchmark "pci_v321_config" {
- title = "Config"
- documentation = file("./pci_v321/docs/pci_v321_config.md")
- children = [
- control.pci_v321_config_1
- ]
-
- tags = merge(local.pci_v321_config_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_config_1" {
- title = "1 AWS Config should be enabled"
- description = "This control checks whether AWS Config is enabled in the account for the local Region and is recording all resources. It does not check for change detection for all critical system files and content files, as AWS Config supports only a subset of resource types. The AWS Config service performs configuration management of supported AWS resources in your account and delivers log files to you. The recorded information includes the configuration item (AWS resource), relationships between configuration items, and any configuration changes between resources."
- severity = "medium"
- query = query.config_enabled_all_regions
- documentation = file("./pci_v321/docs/pci_v321_config_1.md")
-
- tags = merge(local.pci_v321_config_common_tags, {
- pci_item_id = "config_1"
- pci_requirements = "10.5.2,11.5"
- })
-}
diff --git a/pci_v321/cw.sp b/pci_v321/cw.sp
deleted file mode 100644
index 09d3aacf..00000000
--- a/pci_v321/cw.sp
+++ /dev/null
@@ -1,30 +0,0 @@
-locals {
- pci_v321_cw_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/CloudWatch"
- })
-}
-
-benchmark "pci_v321_cw" {
- title = "CloudWatch"
- documentation = file("./pci_v321/docs/pci_v321_cw.md")
- children = [
- control.pci_v321_cw_1
- ]
-
- tags = merge(local.pci_v321_cw_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_cw_1" {
- title = "1 A log metric filter and alarm should exist for usage of the 'root' user"
- description = "This control checks for the CloudWatch metric filters using the following pattern: { $.userIdentity.type = 'Root' && $.userIdentity.invokedBy NOT EXISTS && $.eventType != AwsServiceEvent }."
- severity = "critical"
- query = query.log_metric_filter_root_login
- documentation = file("./pci_v321/docs/pci_v321_cw_1.md")
-
- tags = merge(local.pci_v321_cw_common_tags, {
- pci_item_id = "cw_1"
- pci_requirements = "7.2.1"
- })
-}
diff --git a/pci_v321/dms.sp b/pci_v321/dms.sp
deleted file mode 100644
index 6944b5f2..00000000
--- a/pci_v321/dms.sp
+++ /dev/null
@@ -1,30 +0,0 @@
-locals {
- pci_v321_dms_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/DMS"
- })
-}
-
-benchmark "pci_v321_dms" {
- title = "DMS"
- documentation = file("./pci_v321/docs/pci_v321_dms.md")
- children = [
- control.pci_v321_dms_1
- ]
-
- tags = merge(local.pci_v321_dms_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_dms_1" {
- title = "1 AWS Database Migration Service replication instances should not be public"
- description = "This control checks whether AWS DMS replication instances are public. To do this, it examines the value of the PubliclyAccessible field. A private replication instance has a private IP address that you cannot access outside of the replication network. A replication instance should have a private IP address when the source and target databases are in the same network, and the network is connected to the replication instance's VPC using a VPN, AWS Direct Connect, or VPC peering."
- severity = "critical"
- query = query.dms_replication_instance_not_publicly_accessible
- documentation = file("./pci_v321/docs/pci_v321_dms_1.md")
-
- tags = merge(local.pci_v321_dms_common_tags, {
- pci_item_id = "dms_1"
- pci_requirements = "1.2.1,1.3.1,1.3.2,1.3.4,1.3.6"
- })
-}
diff --git a/pci_v321/docs/pci_overview.md b/pci_v321/docs/pci_overview.md
deleted file mode 100644
index f257c5c0..00000000
--- a/pci_v321/docs/pci_overview.md
+++ /dev/null
@@ -1,45 +0,0 @@
-To obtain the latest version of the official guide, please visit https://www.pcisecuritystandards.org/document_library?category=pcidss&document=pci_dss.
-
-## Overview
-
-The Payment Card Industry Data Security Standard (PCI DSS) was developed to
-encourage and enhance cardholder data security and facilitate the broad
-adoption of consistent data security measures globally. PCI DSS provides a
-baseline of technical and operational requirements designed to protect account
-data. PCI DSS applies to all entities involved in payment card
-processing—including merchants, processors, acquirers, issuers, and service
-providers. PCI DSS also applies to all other entities that store, process or
-transmit cardholder data (CHD) and/or sensitive authentication data (SAD).
-Below is a high-level overview of the 12 PCI DSS requirements.
-
-### PCI Data Security Standard – High Level Overview
-
-Build and Maintain a Secure Network and Systems
-
-1. Install and maintain a firewall configuration to protect cardholder data
-2. Do not use vendor-supplied defaults for system passwords and other security parameters
-
-Protect Cardholder Data
-
-3. Protect stored cardholder data
-4. Encrypt transmission of cardholder data across open, public networks
-
-Maintain a Vulnerability Management Program
-
-5. Protect all systems against malware and regularly update anti-virus software or programs
-6. Develop and maintain secure systems and applications
-
-Implement Strong Access Control Measures
-
-7. Restrict access to cardholder data by business need to know
-8. Identify and authenticate access to system components
-9. Restrict physical access to cardholder data
-
-Regularly Monitor and Test Networks
-
-10. Track and monitor all access to network resources and cardholder data
-11. Regularly test security systems and processes
-
-Maintain an Information Security Policy
-
-12. Maintain a policy that addresses information security for all personnel
\ No newline at end of file
diff --git a/pci_v321/docs/pci_v321_autoscaling.md b/pci_v321/docs/pci_v321_autoscaling.md
deleted file mode 100644
index 5fdb38a1..00000000
--- a/pci_v321/docs/pci_v321_autoscaling.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring Autoscaling resources and options.
diff --git a/pci_v321/docs/pci_v321_autoscaling_1.md b/pci_v321/docs/pci_v321_autoscaling_1.md
deleted file mode 100644
index a8eb77e4..00000000
--- a/pci_v321/docs/pci_v321_autoscaling_1.md
+++ /dev/null
@@ -1,17 +0,0 @@
-## Description
-
-This control checks whether your Auto Scaling groups that are associated with a load balancer are using Elastic Load Balancing health checks.
-
-PCI DSS does not require load balancing or highly available configurations. However, this check aligns with AWS best practices.
-
-## Remediation
-
-To enable Elastic Load Balancing health checks
-
-1. Open the Amazon [EC2 console](https://console.aws.amazon.com/ec2/)
-2. On the navigation pane, under `Auto Scaling`, choose **Auto Scaling Groups**
-3. To select the group from the list, choose the right box
-4. Choose **Edit**
-5. For `Health Check Type`, choose **ELB**
-6. For `Health Check Grace Period`, enter `300`
-7. Choose **Save**
diff --git a/pci_v321/docs/pci_v321_cloudtrail.md b/pci_v321/docs/pci_v321_cloudtrail.md
deleted file mode 100644
index d9ed1200..00000000
--- a/pci_v321/docs/pci_v321_cloudtrail.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring CloudTrail resources and options.
\ No newline at end of file
diff --git a/pci_v321/docs/pci_v321_cloudtrail_1.md b/pci_v321/docs/pci_v321_cloudtrail_1.md
deleted file mode 100644
index be086f37..00000000
--- a/pci_v321/docs/pci_v321_cloudtrail_1.md
+++ /dev/null
@@ -1,20 +0,0 @@
-## Description
-
-This control checks whether AWS CloudTrail is configured to use the server-side encryption (SSE) AWS KMS customer master key (CMK) encryption.
-
-If you are only using the default encryption option, you can choose to disable this check.
-
-## Remediation
-
-To enable encryption for CloudTrail logs
-
-1. Open the CloudTrail console at [CloudTrail](https://console.aws.amazon.com/cloudtrail/).
-1. Choose **Trails**.
-1. Choose the trail to update.
-1. Under General details, choose **Edit**.
-1. For Log file SSE-KMS encryption, select **Enabled**.
-1. Under AWS KMS customer managed CMK, do one of the following:
- - To create a key, choose **New**. Then in AWS KMS alias, enter an alias for the key. The key is created in the same Region as the S3 bucket.
- - To use an existing key, choose **Existing** and then from AWS KMS alias, select the key.
- - The AWS KMS key and S3 bucket must be in the same Region.
-1. Choose **Save changes**.
\ No newline at end of file
diff --git a/pci_v321/docs/pci_v321_cloudtrail_2.md b/pci_v321/docs/pci_v321_cloudtrail_2.md
deleted file mode 100644
index 6d1eba03..00000000
--- a/pci_v321/docs/pci_v321_cloudtrail_2.md
+++ /dev/null
@@ -1,26 +0,0 @@
-## Description
-
-This control checks whether CloudTrail is enabled in your AWS account.
-
-However, some AWS services do not enable logging of all APIs and events. You should implement any additional audit trails other than CloudTrail and review the documentation for each service in CloudTrail Supported Services and Integrations.
-
-## Remediation
-
-To create a new trail in CloudTrail
-
-1. Sign in to the AWS Management Console using the IAM user you configured for CloudTrail administration.
-2. Open the CloudTrail console at [CloudTrail](https://console.aws.amazon.com/cloudtrail/).
-3. In the Region selector, choose the AWS Region where you want your trail to be created. This is the Home Region for the trail.
-4. The Home Region is the only AWS Region where you can view and update the trail after it is created, even if the trail logs events in all AWS Regions.
-5. In the navigation pane, choose **Trails**.
-6. On the Trails page, choose **Get Started Now**. If you do not see that option, choose **Create Trail**.
-7. In Trail name, give your trail a name, such as My-Management-Events-Trail.
-8. As a best practice, use a name that quickly identifies the purpose of the trail. In this case, you're creating a trail that logs management events.
-9. In Management Events, make sure Read/Write events is set to **All**.
-10. In Data Events, do not make any changes. This trail will not log any data events.
-11. Create a new S3 bucket for the logs:
- 1. In Storage Location, in Create a new S3 bucket, choose **Yes**.
- 2. In S3 bucket, give your bucket a name, such as my-bucket-for-storing-cloudtrail-logs.
- 3. The name of your S3 bucket must be globally unique. For more information about S3 bucket naming requirements, see the [AWS CloudTrail User Guide](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html).
-12. Under Advanced, choose **Yes** for both Encrypt log files with SSE-KMS and Enable log file validation.
-13. Choose **Create**.
\ No newline at end of file
diff --git a/pci_v321/docs/pci_v321_cloudtrail_3.md b/pci_v321/docs/pci_v321_cloudtrail_3.md
deleted file mode 100644
index 55ae0ae7..00000000
--- a/pci_v321/docs/pci_v321_cloudtrail_3.md
+++ /dev/null
@@ -1,18 +0,0 @@
-## Description
-
-This control checks whether CloudTrail log file validation is enabled.
-
-It does not check when configurations are altered.
-
-To monitor and alert on log file changes, you can use Amazon EventBridge or CloudWatch metric filters.
-
-## Remediation
-
-To enable CloudTrail log file validation
-
-1. Open the CloudTrail console at [CloudTrail](https://console.aws.amazon.com/cloudtrail/).
-1. In the navigation pane, choose **Trails**.
-1. In the Name column, choose the **Trail Name** to edit.
-1. Under General details, choose **Edit**.
-1. Under Additional settings, for Log file validation,, select **Enabled**.
-1. Choose **Save**.
\ No newline at end of file
diff --git a/pci_v321/docs/pci_v321_cloudtrail_4.md b/pci_v321/docs/pci_v321_cloudtrail_4.md
deleted file mode 100644
index eda2e687..00000000
--- a/pci_v321/docs/pci_v321_cloudtrail_4.md
+++ /dev/null
@@ -1,27 +0,0 @@
-## Description
-
-This control checks whether CloudTrail trails are configured to send logs to CloudWatch Logs.
-
-It does not check for user permissions to alter logs or log groups. You should create specific CloudWatch rules to alert when CloudTrail logs are altered.
-
-This control also does not check for any additional audit log sources other than CloudTrail being sent to a CloudWatch Logs group.
-
-## Remediation
-
-To enable CloudTrail log file validation
-
-1. Open the CloudTrail console at [CloudTrail](https://console.aws.amazon.com/cloudtrail/).
-1. In the navigation pane, choose **Trails**.
-1. Choose a trail that there is no value for in the **CloudWatch Logs Log group** column.
-1. Scroll down to the **CloudWatch Logs** section and then choose **Edit**.
-1. For Log group field, do one of the following:
- - To use the default log group, keep the name as is.
- - To use an existing log group, choose **Existing** and then enter the name of the log group to use.
- - To create a new log group, choose **New** and then enter a name for the log group to create.
-1. Choose **Continue**.
-1. For IAM role, do one of the following:
- - To use an existing role, choose **Existing** and then choose the role from the drop-down list.
- - To create a new role, choose **New** and then enter a name for the role to create.
- - The new role is assigned a policy that grants the necessary permissions.
- To view the permissions granted to the role, expand the **Policy document**.
-1. Choose **Save** changes.
\ No newline at end of file
diff --git a/pci_v321/docs/pci_v321_codebuild.md b/pci_v321/docs/pci_v321_codebuild.md
deleted file mode 100644
index e951e156..00000000
--- a/pci_v321/docs/pci_v321_codebuild.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring CodeBuild resources and options.
diff --git a/pci_v321/docs/pci_v321_codebuild_1.md b/pci_v321/docs/pci_v321_codebuild_1.md
deleted file mode 100644
index 1712dcec..00000000
--- a/pci_v321/docs/pci_v321_codebuild_1.md
+++ /dev/null
@@ -1,20 +0,0 @@
-## Description
-
-This control checks whether the GitHub or Bitbucket source repository URL contains either personal access tokens or a user name and password.
-
-You can use CodeBuild in your PCI DSS environment to compile your source code, run unit tests, or produce artifacts that are ready to deploy. If you do, your authentication credentials should never be stored or transmitted in clear text or appear in the repository URL.
-
-You should use OAuth instead of personal access tokens or a user name and password to grant authorization for accessing GitHub or Bitbucket repositories. This is a method to use strong cryptography to render authentication credentials unreadable.
-
-## Remediation
-
-To remove basic authentication / (GitHub) Personal Access Token from CodeBuild Project Source
-
-1. Open the [CodeBuild console]()
-2. Select your Build project that contains personal access tokens or a user name and password.
-3. From **Edit**, choose Source.
-4. Choose **Disconnect** from `GitHub / Bitbucket`.
-5. Choose **Connect** using `OAuth` and then choose Connect to `GitHub / Bitbucket`.
-6. In the message displayed by your `source provider`, authorize as appropriate.
-7. Reconfigure your Repository URL and additional configuration settings, as needed.
-8. Choose **Update** source.
diff --git a/pci_v321/docs/pci_v321_codebuild_2.md b/pci_v321/docs/pci_v321_codebuild_2.md
deleted file mode 100644
index 55cd67d8..00000000
--- a/pci_v321/docs/pci_v321_codebuild_2.md
+++ /dev/null
@@ -1,33 +0,0 @@
-## Description
-
-This control checks whether the project contains environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.
-
-You can use CodeBuild in your PCI DSS environment to compile your source code, runs unit tests, or produce artifacts that are ready to deploy. If you do, never store the authentication credentials AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY in clear text.
-
-Using environmental variables to store credentials in your CodeBuild project may violate the requirement to use strong cryptography to render authentication credentials unreadable.
-
-## Remediation
-
-To enable Elastic Load Balancing health checks
-
-1. Open the [CodeBuild console]()
-2. Expand **Build**, choose Build project, and then choose the build project that contains plaintext credentials.
-3. From **Edit**, choose `Environment`.
-4. Expand `Additional configuration` and then scroll to `Environment variables`.
-5. Choose **Remove** next to the environment variable.
-6. Choose **Update environment**.
-
-To store sensitive values in the Amazon EC2 Systems Manager Parameter Store and then retrieve them from your build spec
-
-1. Open the [CodeBuild console]()
-2. Expand **Build**, choose `Build project`, and then choose your build project that contains plaintext credentials.
-3. From **Edit**, choose **Environment**.
-4. Expand `Additional configuration` and then scroll to **Environment variables**.
-5. In AWS Systems Manager, create a Systems Manager parameter that contains your sensitive data. For instructions on how to do this, refer to the tutorial in the [AWS Systems Manager User Guide](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-console.html).
-6. After you create the parameter, copy the parameter name.
-7. Back in the CodeBuild console, choose **Create environmental variable**.
-8. For **name**, enter the name of your variable as it appears in your build spec.
-9. For **value**, paste in the name of your parameter.
-10. From **type**, choose **Parameter**.
-11. Choose **Remove** next to your noncompliant environmental variable that contains plaintext credentials.
-12. Choose **Update environment**.
diff --git a/pci_v321/docs/pci_v321_config.md b/pci_v321/docs/pci_v321_config.md
deleted file mode 100644
index 9cfab535..00000000
--- a/pci_v321/docs/pci_v321_config.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring AWS Config.
diff --git a/pci_v321/docs/pci_v321_config_1.md b/pci_v321/docs/pci_v321_config_1.md
deleted file mode 100644
index 624c6538..00000000
--- a/pci_v321/docs/pci_v321_config_1.md
+++ /dev/null
@@ -1,29 +0,0 @@
-## Description
-
-AWS Config rule: None. To run this check, Security Hub runs through audit steps prescribed for it in Securing Amazon Web Services. No AWS Config managed rules are created in your AWS environment for this check.
-
-This control checks whether AWS Config is enabled in the account for the local Region and is recording all resources.
-
-It does not check for change detection for all critical system files and content files, as AWS Config supports only a subset of resource types.
-
-The AWS Config service performs configuration management of supported AWS resources in your account and delivers log files to you. The recorded information includes the configuration item (AWS resource), relationships between configuration items, and any configuration changes between resources.
-
-## Remediation
-
-To configure AWS Config settings
-
-1. Open the [AWS Config console](https://console.aws.amazon.com/config/).
-2. Choose the Region to configure AWS Config in.
-3. If you have not used AWS Config before, choose **Get started**.
-4. On the Settings page, do the following:
- 1. Under Resource types to record, choose Record all resources supported in this region and Include global resources (e.g., AWS IAM resources).
- 2. Under Amazon S3 bucket, either specify the bucket to use or create a bucket and optionally include a prefix.
- 3. Under Amazon SNS topic, either select an Amazon SNS topic from your account or create one. For more information about Amazon SNS, see the [Amazon Simple Notification Service Getting Started Guide](https://docs.aws.amazon.com/sns/latest/dg/sns-getting-started.html).
- 4. Under AWS Config role, either choose `Create AWS Config service-linked role` or choose `Choose a role from your account` and then choose the role to use.
-5. Choose Next.
-6. On the **AWS Config** rules page, choose **Skip**.
-7. Choose **Confirm**.
-
-For more information about using AWS Config from the AWS CLI, see the [AWS Config Developer Guide](https://docs.aws.amazon.com/config/latest/developerguide/gs-cli-subscribe.html).
-
-You can also use an AWS CloudFormation template to automate this process. For more information, see the [AWS CloudFormation User Guide](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-sampletemplates.html).
diff --git a/pci_v321/docs/pci_v321_cw.md b/pci_v321/docs/pci_v321_cw.md
deleted file mode 100644
index 0e1dbe66..00000000
--- a/pci_v321/docs/pci_v321_cw.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring CloudWatch resources and options.
diff --git a/pci_v321/docs/pci_v321_cw_1.md b/pci_v321/docs/pci_v321_cw_1.md
deleted file mode 100644
index 53ea04db..00000000
--- a/pci_v321/docs/pci_v321_cw_1.md
+++ /dev/null
@@ -1,51 +0,0 @@
-## Description
-
-This control checks for the CloudWatch metric filters using the following pattern:
-
-```
-{ $.userIdentity.type = "Root" && $.userIdentity.invokedBy NOT EXISTS && $.eventType != "AwsServiceEvent" }
-```
-
-It checks the following:
-
- - The log group name is configured for use with active multi-Region CloudTrail.
- - There is at least one Event Selector for a Trail with IncludeManagementEvents set to true and ReadWriteType set to All.
- - There is at least one active subscriber to an Amazon SNS topic associated with the alarm.
-
-## Remediation
-
-The steps to remediate this issue include setting up an Amazon SNS topic, a metric filter, and an alarm for the metric filter.
-
-To create an Amazon SNS topic
-
-1. Open the Amazon SNS console at https://console.aws.amazon.com/sns/v3/home.
-2. Create an Amazon SNS topic that receives all CIS alarms.
-3. Create at least one subscriber to the topic.
-4. For more information about creating Amazon SNS topics, see the Amazon Simple Notification Service Developer Guide.
-5. Set up an active CloudTrail trail that applies to all Regions.
-6. To do this, follow the remediation steps in CIS v1.3.0 [3.1 Ensure CloudTrail is enabled in all Regions](https://hub.steampipe.io/mods/turbot/aws_compliance/controls/control.cis_v130_3_1).
-7. Make a note of the associated log group name.
-
-To create a metric filter and alarm
-
-1. Open the [CloudWatch console](https://console.aws.amazon.com/cloudwatch/).
-2. Choose Logs, then choose **Log groups**.
-3. Choose the log group where CloudTrail is logging.
-4. On the log group details page, choose **Metric filters**.
-5. Choose **Create metric filter**.
-6. Copy the following pattern and then paste it into Filter pattern.
-
- ```
- {$.userIdentity.type="Root" && $.userIdentity.invokedBy NOT EXISTS && $.eventType !="AwsServiceEvent"}
- ```
-7. Enter the name of the new filter. For example, RootAccountUsage.
-8. Confirm that the value for **Metric namespace** is `LogMetrics`.
-9. This ensures that all CIS Benchmark metrics are grouped together.
-10. In **Metric name**, enter the name of the metric.
-11. In Metric value, enter 1, and then choose **Next**.
-12. Choose **Create metric filter**.
-13. Next, set up the notification. Select the select the metric filter you just created, then choose **Create alarm**.
-14. Enter the threshold for the alarm (for example, 1), then choose **Next**.
-15. Under Select an SNS topic, for Send notification to, choose an email list, then choose Next.
-16. Enter a **Name and Description** for the alarm, such as `RootAccountUsageAlarm`, then choose **Next**.
-17. Choose **Create Alarm**.
diff --git a/pci_v321/docs/pci_v321_dms.md b/pci_v321/docs/pci_v321_dms.md
deleted file mode 100644
index f30fba08..00000000
--- a/pci_v321/docs/pci_v321_dms.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring AWS DMS resources and options.
diff --git a/pci_v321/docs/pci_v321_dms_1.md b/pci_v321/docs/pci_v321_dms_1.md
deleted file mode 100644
index 51a29287..00000000
--- a/pci_v321/docs/pci_v321_dms_1.md
+++ /dev/null
@@ -1,22 +0,0 @@
-## Description
-
-This control checks whether AWS DMS replication instances are public. To do this, it examines the value of the PubliclyAccessible field.
-
-A private replication instance has a private IP address that you cannot access outside of the replication network. A replication instance should have a private IP address when the source and target databases are in the same network, and the network is connected to the replication instance's VPC using a VPN, AWS Direct Connect, or VPC peering.
-
-You should also ensure that access to your AWS DMS instance configuration is limited to only authorized users. To do this, restrict users’ IAM permissions to modify AWS DMS settings and resources.
-
-## Remediation
-
-**Note** that you cannot change the public access setting once a replication instance is created. It must be deleted and recreated.
-
-To configure the AWS DMS replication instances setting to be not publicly accessible
-
-1. Open the [AWS Database Migration Service console](https://console.aws.amazon.com/dms/).
-2. In the left navigation pane, under `Resource management`, navigate to `Replication instances`.
-3. To delete the public instance, select the check box for the instance, choose **Actions**, then choose **delete**.
-4. Choose Create replication instance. Provide the configuration details.
-5. To disable public access, make sure that Publicly accessible is not selected.
-6. Choose **Create**.
-
-For more information, see the section on [Creating a replication instance](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.Creating) in the AWS Database Migration Service User Guide.
diff --git a/pci_v321/docs/pci_v321_ec2.md b/pci_v321/docs/pci_v321_ec2.md
deleted file mode 100644
index e8bfa2a4..00000000
--- a/pci_v321/docs/pci_v321_ec2.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring EC2 resources and options.
diff --git a/pci_v321/docs/pci_v321_ec2_1.md b/pci_v321/docs/pci_v321_ec2_1.md
deleted file mode 100644
index adc120c8..00000000
--- a/pci_v321/docs/pci_v321_ec2_1.md
+++ /dev/null
@@ -1,13 +0,0 @@
-## Description
-
-This control checks whether Amazon Elastic Block Store snapshots are not publicly restorable by everyone, which makes them public. Amazon EBS snapshots should not be publicly restorable by everyone unless you explicitly allow it, to avoid accidental exposure of your company’s sensitive data.
-
-You should also ensure that permission to change Amazon EBS configurations are restricted to authorized AWS accounts only. Learn more about managing Amazon EBS snapshot permissions in the [Amazon EC2 User Guide for Linux Instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html).
-
-## Remediation
-
-1. Open the [Amazon EC2 console](https://console.aws.amazon.com/ec2/).
-2. In the navigation pane, under `Elastic Block Store`, choose **Snapshots** and then select your public snapshot.
-3. Choose **Permissions** tab from metadata view section, click **Edit**
-4. Choose **Private** in `This snapshot is currently`
-5. Choose **Save**
diff --git a/pci_v321/docs/pci_v321_ec2_2.md b/pci_v321/docs/pci_v321_ec2_2.md
deleted file mode 100644
index 5b0a6d9f..00000000
--- a/pci_v321/docs/pci_v321_ec2_2.md
+++ /dev/null
@@ -1,18 +0,0 @@
-## Description
-
-This control checks that the default security group of a VPC does not allow inbound or outbound traffic.
-
-It does not check for access restrictions for other security groups that are not default, and other VPC configurations.
-
-## Remediation
-
-To remediate this issue, create new security groups and assign those security groups to your resources. To prevent the default security groups from being used, remove their inbound and outbound rules.
-
-1. Open the [Amazon VPC console](https://console.aws.amazon.com/vpc/).
-2. In the navigation pane, choose `Security groups`. View the `default security groups` details to see the resources that are assigned to them.
-3. Select a default security group, and choose the **Inbound rules** tab. Choose **Edit** inbound rules. Then delete all of the inbound rules. Choose **Save rules**.
-4. Repeat the previous step for each default security group.
-5. Select a default security group and choose the **Outbound rules** tab. Choose **Edit** outbound rules. Then delete all of the outbound rules. Choose **Save rules**.
-6. Repeat the previous step for each default security group.
-
-Create a set of least-privilege security groups for the resources. For details on how to create security groups, see [Creating a security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#CreatingSecurityGroups) in the Amazon VPC User Guide.
diff --git a/pci_v321/docs/pci_v321_ec2_3.md b/pci_v321/docs/pci_v321_ec2_3.md
deleted file mode 100644
index 46453c61..00000000
--- a/pci_v321/docs/pci_v321_ec2_3.md
+++ /dev/null
@@ -1,15 +0,0 @@
-## Description
-
-This control helps you maintain an accurate asset inventory of needed security groups in your cardholder data environment (CDE). It does so by checking that security groups are attached to Amazon EC2 instances or to an ENI. A failed finding indicates you may have unused Amazon EC2 security groups.
-
-Unless there is a business need to retain them, you should remove unused resources to maintain an accurate inventory of system components.
-
-## Remediation
-
-You must perform the following steps for each security group not attached to an ENI.
-
-1. Open the Amazon [VPC console](https://console.aws.amazon.com/vpc/)
-2. In the navigation pane, under Security, choose **Security groups**.
-3. Select the check box for the security group to **delete**.
-4. From **Actions**, choose **Delete security group**.
-5. Choose **Delete**.
diff --git a/pci_v321/docs/pci_v321_ec2_4.md b/pci_v321/docs/pci_v321_ec2_4.md
deleted file mode 100644
index 1d476388..00000000
--- a/pci_v321/docs/pci_v321_ec2_4.md
+++ /dev/null
@@ -1,16 +0,0 @@
-## Description
-
-This control checks whether Elastic IP addresses that are allocated to a VPC are attached to Amazon EC2 instances or in-use elastic network interfaces (ENIs).
-
-A failed finding indicates you may have unused Amazon EC2 EIPs.
-
-This will help you maintain an accurate asset inventory of EIPs in your cardholder data environment (CDE). Unless there is a business need to retain them, you should remove unused resources to maintain an accurate inventory of system components.
-
-## Remediation
-
-To remediate this issue, create new security groups and assign those security groups to your resources. To prevent the default security groups from being used, remove their inbound and outbound rules.
-
-1. Open the [Amazon EC2 console](https://console.aws.amazon.com/ec2/).
-2. In the navigation pane, under Network & Security, choose Elastic IPs.
-3. Choose the Elastic IP address, choose **Actions**, and then choose **Release Elastic IP address**.
-4. When prompted, choose **Release**.
diff --git a/pci_v321/docs/pci_v321_ec2_5.md b/pci_v321/docs/pci_v321_ec2_5.md
deleted file mode 100644
index 6b996cae..00000000
--- a/pci_v321/docs/pci_v321_ec2_5.md
+++ /dev/null
@@ -1,19 +0,0 @@
-## Description
-
-This control checks whether security groups in use disallow unrestricted incoming SSH traffic.
-
-It does not evaluate outbound traffic.
-
-Note that security groups are stateful. If you send a request from your instance, the response traffic for that request is allowed to flow in regardless of inbound security group rules. Responses to allowed inbound traffic are allowed to flow out regardless of outbound rules.
-
-## Remediation
-
-Perform the following steps for each security group associated with a VPC.
-
-1. Open the Amazon [VPC console](https://console.aws.amazon.com/vpc/).
-2. In the navigation pane, under Security, choose **Security groups**.
-3. Select a `security group`.
-4. In the bottom section of the page, choose `Inbound rules`.
-5. Choose **Edit** `inbound rules`.
-6. Identify the rule that allows access through port 22 and then choose the `X` to remove it.
-7. Choose **Save** rules.
diff --git a/pci_v321/docs/pci_v321_ec2_6.md b/pci_v321/docs/pci_v321_ec2_6.md
deleted file mode 100644
index 4a66927a..00000000
--- a/pci_v321/docs/pci_v321_ec2_6.md
+++ /dev/null
@@ -1,23 +0,0 @@
-## Description
-
-This control checks whether VPC flow logs are found and enabled for VPCs. The traffic type is set to REJECT.
-
-With VPC Flow Logs, you can capture information about the IP address traffic to and from network interfaces in your VPC. After you create a flow log, you can use CloudWatch Logs to view and retrieve the log data.
-
-Security Hub recommends that you enable flow logging for packet rejects for VPCs. Flow logs provide visibility into network traffic that traverses the VPC. They can detect anomalous traffic and provide insight into security workflows.
-
-By default, the record includes values for the different components of the IP address flow, including the source, destination, and protocol. For more information and descriptions of the log fields, see [VPC Flow Logs](https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html)in the Amazon VPC User Guide.
-
-## Remediation
-
-To enable VPC flow logging
-
-1. Open the [Amazon VPC console](https://console.aws.amazon.com/vpc/).
-2. In the navigation pane, under Virtual Private Cloud, choose Your VPCs.
-3. Select a `VPC` to update.
-4. At the bottom of the page, choose **Flow Logs**.
-5. Choose **Create** flow log.
-6. For Filter, choose **Reject**.
-7. For Destination log group, choose the `log group` to use.
-8. If you chose `CloudWatch Logs` for your destination log group, for IAM role, choose the IAM role to use.
-9. Choose **Create**.
diff --git a/pci_v321/docs/pci_v321_elbv2.md b/pci_v321/docs/pci_v321_elbv2.md
deleted file mode 100644
index 103c16c1..00000000
--- a/pci_v321/docs/pci_v321_elbv2.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring Elastic Load Balancer resources and options.
diff --git a/pci_v321/docs/pci_v321_elbv2_1.md b/pci_v321/docs/pci_v321_elbv2_1.md
deleted file mode 100644
index 79e69478..00000000
--- a/pci_v321/docs/pci_v321_elbv2_1.md
+++ /dev/null
@@ -1,18 +0,0 @@
-## Description
-
-This control checks whether HTTP to HTTPS redirection is configured on all HTTP listeners of Application Load Balancers. The control fails if any of the HTTP listeners of Application Load Balancers do not have HTTP to HTTPS redirection configured.
-
-Before you start to use your Application Load Balancer, you must add one or more listeners. A listener is a process that uses the configured protocol and port to check for connection requests. Listeners support both the HTTP and HTTPS protocols. You can use an HTTPS listener to offload the work of encryption and decryption to your load balancer. To enforce encryption in transit, you should use redirect actions with Application Load Balancers to redirect client HTTP requests to an HTTPS request on port 443.
-
-## Remediation
-
-To enable VPC flow logging
-
-1. Open the [Amazon EC2 console](https://console.aws.amazon.com/ec2/).
-2. In the navigation pane, under Load Balancing, choose **Load balancers**.
-3. Choose an `Application Load Balancer`.
-4. Choose **Listeners**.
-5. Select the check box for an HTTP listener (port 80 TCP) and then choose **Edit**.
-6. If there is an existing rule, you must delete it. Otherwise, choose **Add action** and then choose **Redirect to....**
-7. Choose `HTTPS` and then enter `443`.
-8. Choose the check mark in a circle symbol and then choose **Update**.
diff --git a/pci_v321/docs/pci_v321_es.md b/pci_v321/docs/pci_v321_es.md
deleted file mode 100644
index 1517ca9e..00000000
--- a/pci_v321/docs/pci_v321_es.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring Elasticsearch resources and options.
diff --git a/pci_v321/docs/pci_v321_es_1.md b/pci_v321/docs/pci_v321_es_1.md
deleted file mode 100644
index ce833a51..00000000
--- a/pci_v321/docs/pci_v321_es_1.md
+++ /dev/null
@@ -1,15 +0,0 @@
-## Description
-
-This control checks whether Amazon Elasticsearch Service domains are in a VPC.
-
-It does not evaluate the VPC subnet routing configuration to determine public reachability.
-
-This AWS control also does not check whether the Amazon ES resource-based policy permits public access by other accounts or external entities. You should ensure that Amazon ES domains are not attached to public subnets. See [Resource-based policies](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-ac.html#es-ac-types-resource) in the Amazon Elasticsearch Service Developer Guide.
-
-## Remediation
-
-If you create a domain with a public endpoint, you cannot later place it within a VPC. Instead, you must create a new domain and migrate your data.
-
-The reverse is also true. If you create a domain within a VPC, it cannot have a public endpoint. Instead, you must either create another domain or disable this control.
-
-See the information on migrating from public access to VPC access in the [Amazon Elasticsearch Service Developer Guide](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html#es-migrating-public-to-vpc).
diff --git a/pci_v321/docs/pci_v321_es_2.md b/pci_v321/docs/pci_v321_es_2.md
deleted file mode 100644
index 85337fd6..00000000
--- a/pci_v321/docs/pci_v321_es_2.md
+++ /dev/null
@@ -1,11 +0,0 @@
-## Description
-
-This control checks whether Amazon ES domains have encryption at rest configuration enabled.
-
-## Remediation
-
-By default, domains do not encrypt data at rest, and you cannot configure existing domains to use the feature.
-
-To enable the feature, you must create another domain and migrate your data. For information about creating domains, see the [Amazon Elasticsearch Service Developer Guide](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomains).
-
-Encryption of data at rest requires Amazon ES 5.1 or later. For more information about encrypting data at rest for Amazon ES, see the [Amazon Elasticsearch Service Developer Guide](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/encryption-at-rest.html).
diff --git a/pci_v321/docs/pci_v321_guardduty.md b/pci_v321/docs/pci_v321_guardduty.md
deleted file mode 100644
index ee5b9eff..00000000
--- a/pci_v321/docs/pci_v321_guardduty.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring AWS GuardDuty resources and options.
diff --git a/pci_v321/docs/pci_v321_guardduty_1.md b/pci_v321/docs/pci_v321_guardduty_1.md
deleted file mode 100644
index 1b61a89b..00000000
--- a/pci_v321/docs/pci_v321_guardduty_1.md
+++ /dev/null
@@ -1,11 +0,0 @@
-## Description
-
-This control checks whether Amazon GuardDuty is enabled in your AWS account and Region.
-
-While GuardDuty can be effective against attacks that an intrusion detection system would typically protect, it might not be a complete solution for every environment. This rule also does not check for the generation of alerts to personnel. For more information about GuardDuty, see the [Amazon GuardDuty User Guide](https://docs.aws.amazon.com/guardduty/latest/ug/what-is-guardduty.html).
-
-## Remediation
-
-To remediate this issue, you enable GuardDuty.
-
-Refer here for more [Getting started with GuardDuty](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_settingup.html).
diff --git a/pci_v321/docs/pci_v321_iam.md b/pci_v321/docs/pci_v321_iam.md
deleted file mode 100644
index 01abbd24..00000000
--- a/pci_v321/docs/pci_v321_iam.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring AWS IAM resources and options.
diff --git a/pci_v321/docs/pci_v321_iam_1.md b/pci_v321/docs/pci_v321_iam_1.md
deleted file mode 100644
index f82329fc..00000000
--- a/pci_v321/docs/pci_v321_iam_1.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Description
-
-This control checks whether user access keys exist for the root user.
-
-## Remediation
-
-To delete access keys
-
-1. Log in to your account using the root user credentials.
-2. Choose the account name near the top-right corner of the page and then choose **My Security Credentials**.
-3. In the pop-up warning, choose **Continue to Security Credentials**.
-4. Choose `Access keys (access key ID and secret access key)`.
-5. To permanently delete the key, choose **Delete** and then choose **Yes**. You cannot recover deleted keys.
-6. If there is more than one root user access key, then repeat steps 4 and 5 for each key.
diff --git a/pci_v321/docs/pci_v321_iam_2.md b/pci_v321/docs/pci_v321_iam_2.md
deleted file mode 100644
index 1d3437e0..00000000
--- a/pci_v321/docs/pci_v321_iam_2.md
+++ /dev/null
@@ -1,40 +0,0 @@
-## Description
-
-This control checks that none of your IAM users have policies attached. IAM users must inherit permissions from IAM groups or roles.
-
-It does not check whether least privileged policies are applied to IAM roles and groups.
-
-## Remediation
-
-To resolve this issue, do the following:
-
-1. Create an IAM group
-2. Assign the policy to the group
-3. Add the users to the group
-
-The policy is applied to each user in the group.
-
-**To create an IAM group**
-
-1. Open the [IAM console](https://console.aws.amazon.com/iam/).
-2. Choose **Groups** and then choose **Create New Group**.
-3. Enter a name for the group to create and then choose **Next Step**.
-4. Select each policy to assign to the group and then choose **Next Step**.
-5. The policies that you choose should include any policies currently attached directly to a user account. The next step to resolve a failed check is to add users to a group and then assign the policies to that group.
-6. Each user in the group gets assigned the policies assigned to the group.
-7. Confirm the details on the **Review** page and then choose **Create Group**.
-
-**To add users to an IAM group**
-
-1. Open the [IAM console](https://console.aws.amazon.com/iam/).
-2. Choose **Groups**.
-3. Choose **Group Actions** and then choose **Add Users to Group**.
-4. Choose the users to add to the group and then choose **Add Users**.
-
-**To remove a policy attached directly to a user**
-
-1. Open the [IAM console](https://console.aws.amazon.com/iam/).
-2. Choose **Users**.
-3. For the user to detach a policy from, in the User name column, choose the name.
-4. For each policy listed under **Attached directly**, to remove the policy from the user, choose the X on the right side of the page and then choose **Remove**.
-5. Confirm that the user can still use AWS services as expected.
diff --git a/pci_v321/docs/pci_v321_iam_3.md b/pci_v321/docs/pci_v321_iam_3.md
deleted file mode 100644
index 5645cd53..00000000
--- a/pci_v321/docs/pci_v321_iam_3.md
+++ /dev/null
@@ -1,16 +0,0 @@
-## Description
-
-This control checks whether the default version of AWS Identity and Access Management policies (also known as customer managed policies) do not have administrator access with a statement that has `"Effect"`: "Allow" with "Action": "*" over "Resource": "*".
-
-It only checks for the customer managed policies that you created, but does not check for full access to individual services, such as "S3:*".
-
-It does not check for inline and AWS managed policies.
-
-## Remediation
-
-1. Open the [IAM console](https://console.aws.amazon.com/iam/).
-2. Choose **Policies**.
-3. Choose the radio button next to the policy to remove.
-4. From **Policy actions**, choose **Detach**.
-5. On the **Detach policy** page, choose the radio button next to each user to detach the policy from and then choose **Detach policy**.
-6. Confirm that the user that you detached the policy from can still access AWS services and resources as expected.
diff --git a/pci_v321/docs/pci_v321_iam_4.md b/pci_v321/docs/pci_v321_iam_4.md
deleted file mode 100644
index d301d3e8..00000000
--- a/pci_v321/docs/pci_v321_iam_4.md
+++ /dev/null
@@ -1,19 +0,0 @@
-## Description
-
-This control checks whether your AWS account is enabled to use multi-factor authentication (MFA) hardware device to sign in with root user credentials.
-
-It does not check whether you are using virtual MFA.
-
-To address PCI DSS requirement 8.3.1, you can choose between hardware MFA (this control) or virtual MFA `PCI.IAM.5`(Virtual MFA should be enabled for the root user).
-
-## Remediation
-
-To enable hardware-based MFA for the root account
-
-1. Log in to your account using the root user credentials.
-2. Choose the account name at the top right of the page and then choose **My Security Credentials**.
-3. In the warning, choose **Continue to Security Credentials**.
-4. Choose **Multi-factor authentication (MFA)**.
-5. Choose **Activate MFA**.
-6. Choose a hardware-based (not virtual) device to use for MFA and then choose **Continue**.
-7. Complete the steps to configure the device type appropriate to your selection.
diff --git a/pci_v321/docs/pci_v321_iam_5.md b/pci_v321/docs/pci_v321_iam_5.md
deleted file mode 100644
index d452a3e8..00000000
--- a/pci_v321/docs/pci_v321_iam_5.md
+++ /dev/null
@@ -1,19 +0,0 @@
-## Description
-
-This control checks whether users of your AWS account require a multi-factor authentication (MFA) device to sign in with root user credentials.
-
-It does not check whether you are using hardware MFA.
-
-To address PCI DSS requirement 8.3.1, you can choose between virtual MFA (this control) or hardware MFA `PCI.IAM.4`(Hardware MFA should be enabled for the root user).
-
-## Remediation
-
-To enable MFA for the root account
-
-1. Log in to your account using the root user credentials.
-2. Choose the account name at the top-right of the page and then choose My Security Credentials.
-3. In the warning, choose Continue to Security Credentials.
-4. Choose Multi-factor authentication (MFA).
-5. Choose Activate MFA.
-6. Choose the type of device to use for MFA and then choose Continue.
-7. Complete the steps to configure the device type appropriate to your selection.
diff --git a/pci_v321/docs/pci_v321_kms.md b/pci_v321/docs/pci_v321_kms.md
deleted file mode 100644
index 2127291c..00000000
--- a/pci_v321/docs/pci_v321_kms.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring AWS KMS resources and options.
diff --git a/pci_v321/docs/pci_v321_kms_1.md b/pci_v321/docs/pci_v321_kms_1.md
deleted file mode 100644
index 83a8e02b..00000000
--- a/pci_v321/docs/pci_v321_kms_1.md
+++ /dev/null
@@ -1,16 +0,0 @@
-## Description
-
-This control checks that key rotation is enabled for each customer master key (CMK). It does not check CMKs that have imported key material.
-
-You should ensure keys that have imported material and those that are not stored in AWS KMS are rotated. AWS managed customer master keys are rotated once every 3 years.
-
-## Remediation
-
-To enable CMK rotation
-
-1. Open the [AWS KMS console](https://console.aws.amazon.com/kms).
-2. To change the AWS Region, use the Region selector in the upper-right corner of the page.
-3. Choose **Customer managed keys**.
-4. In the Alias column, choose the alias of the key to update.
-5. Choose **Key rotation**.
-6. Select Automatically rotate this CMK every year and then choose **Save**.
diff --git a/pci_v321/docs/pci_v321_lambda.md b/pci_v321/docs/pci_v321_lambda.md
deleted file mode 100644
index 30e4f252..00000000
--- a/pci_v321/docs/pci_v321_lambda.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring Lambda resources and options.
diff --git a/pci_v321/docs/pci_v321_lambda_1.md b/pci_v321/docs/pci_v321_lambda_1.md
deleted file mode 100644
index 00c6ef08..00000000
--- a/pci_v321/docs/pci_v321_lambda_1.md
+++ /dev/null
@@ -1,47 +0,0 @@
-## Description
-
-This control checks whether the Lambda function resource-based policy prohibits public access.
-
-It does not check for access to the Lambda function by internal principals, such as IAM roles. You should ensure that access to the Lambda function is restricted to authorized principals only by using least privilege Lambda resource-based policies.
-
-For more information about using resource-based policies for AWS Lambda, see the [AWS Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/access-control-resource-based.html).
-
-## Remediation
-
-To remediate this issue, you update the resource-based policy to change the publicly accessible Lambda function to a private Lambda function.
-You can only update resource-based policies for Lambda resources within the scope of the AddPermission and AddLayerVersionPermission API actions.
-You cannot author policies for your Lambda resources in JSON, or use conditions that don't map to parameters for those actions using the CLI or the SDK.
-
-**To use the AWS CLI to revoke function-use permission from an AWS service or another account**
-
-1. To get the ID of the statement from the output of GetPolicy, from the AWS CLI, run the following:
-
-```bash
-aws lambda get-policy —function-name yourfunctionname
-```
-This command returns the Lambda resource-based policy string associated with the publicly accessible Lambda function.
-
-2. From the policy statement returned by the get-policy command, copy the string value of the Sid field.
-
-3. From the AWS CLI, run
-
-```bash
-aws lambda remove-permission --function-name yourfunctionname —statement-id youridvalue
-```
-
-To use the Lambda console to restrict access to the Lambda function
-
-1. Open the [AWS Lambda console](https://console.aws.amazon.com/lambda/).
-2. Navigate to Functions and then select your publicly accessible Lambda function.
-3. Under **Designer**, choose the key icon at the top left. It has the tool-tip View permissions.
-4. Under Function policy, if the policy allows actions for the principal element `“*”` or `{“AWS”: “*”}`, it is publicly accessible.
- - Consider adding the following IAM condition to scope access to your account only.
-
- ```json
- "Condition": {
- "StringEquals": {
- "AWS:SourceAccount": ""
- }
- }
- }
- ```
diff --git a/pci_v321/docs/pci_v321_lambda_2.md b/pci_v321/docs/pci_v321_lambda_2.md
deleted file mode 100644
index 48eadc26..00000000
--- a/pci_v321/docs/pci_v321_lambda_2.md
+++ /dev/null
@@ -1,18 +0,0 @@
-## Description
-
-This control checks whether a Lambda function is in a VPC.
-
-It does not evaluate the VPC subnet routing configuration to determine public reachability.
-
-Note that if Lambda@Edge is found in the account, then this control generates failed findings. To prevent these findings, you can disable this control.
-
-## Remediation
-
-To configure a function to connect to private subnets in a virtual private cloud (VPC) in your account
-
-1. Open the [AWS Lambda console](https://console.aws.amazon.com/lambda/).
-2. Navigate to `Functions` and then select your Lambda function.
-3. Scroll to **Network** and then select a **VPC** with the connectivity requirements of the function.
-4. To run your functions in high availability mode, Security Hub recommends that you choose at least two subnets.
-5. Choose at least one security group that has the connectivity requirements of the function.
-6. Choose **Save**.
diff --git a/pci_v321/docs/pci_v321_opensearch.md b/pci_v321/docs/pci_v321_opensearch.md
deleted file mode 100644
index d2a41c68..00000000
--- a/pci_v321/docs/pci_v321_opensearch.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring OpenSearch resources and options.
diff --git a/pci_v321/docs/pci_v321_opensearch_1.md b/pci_v321/docs/pci_v321_opensearch_1.md
deleted file mode 100644
index 9aa57080..00000000
--- a/pci_v321/docs/pci_v321_opensearch_1.md
+++ /dev/null
@@ -1,15 +0,0 @@
-## Description
-
-This control checks whether Amazon OpenSearch domains are in a VPC. It does not evaluate the VPC subnet routing configuration to determine public access.
-
-You should ensure that Amazon OpenSearch domains are not attached to public subnets. See [Resource-based policies](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/ac.html#ac-types-resource) in the Amazon OpenSearch Service Developer Guide.
-
-You also should ensure that your VPC is configured according to the recommended best practices. See [Security best practices for your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-best-practices.html) in the Amazon VPC User Guide.
-
-Amazon OpenSearch domains deployed within a VPC can communicate with VPC resources over the private AWS network, without the need to traverse the public internet. This configuration increases the security posture by limiting access to the data in transit. VPCs provide a number of network controls to secure access to Amazon OpenSearch domains, including network ACL and security groups. Security Hub recommends that you migrate public Amazon OpenSearch domains to VPCs to take advantage of these controls.
-
-## Remediation
-
-If you create a domain with a public endpoint, you cannot later place it within a VPC. Instead, you must create a new domain and migrate your data. The reverse is also true. If you create a domain within a VPC, it cannot have a public endpoint. Instead, you must either [create another domain](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/createupdatedomains.html#es-createdomains) or disable this control.
-
-See [Launching your Amazon OpenSearch Service domains within a VPC](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/vpc.html) in the Amazon OpenSearch Service Developer Guide.
\ No newline at end of file
diff --git a/pci_v321/docs/pci_v321_opensearch_2.md b/pci_v321/docs/pci_v321_opensearch_2.md
deleted file mode 100644
index 2fe7a25a..00000000
--- a/pci_v321/docs/pci_v321_opensearch_2.md
+++ /dev/null
@@ -1,16 +0,0 @@
-## Description
-
-This control checks whether Amazon OpenSearch domains have encryption-at-rest configuration enabled. The check fails if encryption at rest is not enabled.
-
-For an added layer of security for your sensitive data in OpenSearch, you should configure your OpenSearch domain to be encrypted at rest. OpenSearch domains offer encryption of data at rest. The feature uses AWS KMS to store and manage your encryption keys. To perform the encryption, it uses the Advanced Encryption Standard algorithm with 256-bit keys (AES-256).
-
-To learn more about OpenSearch encryption at rest, see [Encryption of data at rest for Amazon OpenSearch Service](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/encryption-at-rest.html) in the Amazon OpenSearch Service Developer Guide.
-
-
-## Remediation
-
-By default, domains do not encrypt data at rest, and you cannot configure existing domains to use the feature. To enable the feature, you must create another domain and migrate your data.
-
-For information about creating domains, see [Creating and managing Amazon OpenSearch Service domains](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/vpc.html) in the Amazon OpenSearch Service Developer Guide.
-
-See [Launching your Amazon OpenSearch Service domains within a VPC](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/vpc.html) in the Amazon OpenSearch Service Developer Guide.
diff --git a/pci_v321/docs/pci_v321_rds.md b/pci_v321/docs/pci_v321_rds.md
deleted file mode 100644
index c7743854..00000000
--- a/pci_v321/docs/pci_v321_rds.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring AWS RDS resources and options.
diff --git a/pci_v321/docs/pci_v321_rds_1.md b/pci_v321/docs/pci_v321_rds_1.md
deleted file mode 100644
index 7fabf1fa..00000000
--- a/pci_v321/docs/pci_v321_rds_1.md
+++ /dev/null
@@ -1,16 +0,0 @@
-## Description
-
-This control checks whether Amazon RDS DB snapshots prohibit access by other accounts. You should also ensure that access to the snapshot and permission to change Amazon RDS configuration is restricted to authorized principals only.
-
-**Note** that if the configuration is changed to allow public access, the AWS Config rule may not be able to detect the change for up to 12 hours. Until the AWS Config rule detects the change, the check passes even though the configuration violates the rule.
-
-## Remediation
-
-To remove public access for Amazon RDS Databases
-
-1. Open the [Amazon RDS console](https://console.aws.amazon.com/rds/).
-2. Navigate to Snapshots and then select the public Snapshot you want to modify
-3. From the **Actions** list, choose **Share Snapshots**
-4. From **DB snapshot visibility**, choose **Private**
-5. Under **DB snapshot visibility**, select **for all**
-6. Choose **Save**
diff --git a/pci_v321/docs/pci_v321_rds_2.md b/pci_v321/docs/pci_v321_rds_2.md
deleted file mode 100644
index 747242de..00000000
--- a/pci_v321/docs/pci_v321_rds_2.md
+++ /dev/null
@@ -1,18 +0,0 @@
-## Description
-
-This control checks whether RDS instances are publicly accessible by evaluating the publiclyAccessible field in the instance configuration item. The value of publiclyAccessible indicates whether the DB instance is publicly accessible. When the DB instance is publicly accessible, it is an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. When the DB instance isn't publicly accessible, it is an internal instance with a DNS name that resolves to a private IP address.
-
-The control does not check VPC subnet routing settings or the Security Group rules. You should also ensure VPC subnet routing does not allow public access, and that the security group inbound rule associated with the RDS instance does not allow unrestricted access (0.0.0.0/0). You should also ensure that access to your RDS instance configuration is limited to only authorized users by restricting users' IAM permissions to modify RDS instances settings and resources.
-
-## Remediation
-
-To remove public access for Amazon RDS Databases
-
-1. Open the [Amazon RDS console](https://console.aws.amazon.com/rds/).
-2. Navigate to Databases and then choose your public database.
-3. Choose **Modify**.
-4. Scroll to **Network & Security**.
-5. For `Public accessibility`, choose **No**.
-6. Scroll to the bottom and then choose **Continue**.
-7. Under Scheduling of modifications, choose **Apply immediately**.
-8. Choose Modify DB Instance.
diff --git a/pci_v321/docs/pci_v321_redshift.md b/pci_v321/docs/pci_v321_redshift.md
deleted file mode 100644
index fca4bd19..00000000
--- a/pci_v321/docs/pci_v321_redshift.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring AWS Redshift resources and options.
diff --git a/pci_v321/docs/pci_v321_redshift_1.md b/pci_v321/docs/pci_v321_redshift_1.md
deleted file mode 100644
index 40a2ed7c..00000000
--- a/pci_v321/docs/pci_v321_redshift_1.md
+++ /dev/null
@@ -1,11 +0,0 @@
-## Description
-
-This control checks whether Amazon Redshift clusters are publicly accessible by evaluating the publiclyAccessible field in the cluster configuration item.
-
-## Remediation
-
-1. Open the [Amazon Redshift console](https://console.aws.amazon.com/redshift/).
-2. On the navigation pane, choose **Clusters** and then select your public Amazon Redshift cluster.
-3. From the Cluster drop-down menu, choose **Modify cluster**.
-4. In `Publicly accessible`, choose **No**.
-5. Choose **Modify**.
diff --git a/pci_v321/docs/pci_v321_s3.md b/pci_v321/docs/pci_v321_s3.md
deleted file mode 100644
index b378322f..00000000
--- a/pci_v321/docs/pci_v321_s3.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring AWS S3 resources and options.
diff --git a/pci_v321/docs/pci_v321_s3_1.md b/pci_v321/docs/pci_v321_s3_1.md
deleted file mode 100644
index d0fc0ac3..00000000
--- a/pci_v321/docs/pci_v321_s3_1.md
+++ /dev/null
@@ -1,13 +0,0 @@
-## Description
-
-This control checks whether your S3 buckets allow public write access by evaluating the Block Public Access settings, the bucket policy, and the bucket access control list (ACL).
-
-It does not check for write access to the bucket by internal principals, such as IAM roles. You should ensure that access to the bucket is restricted to authorized principals only.
-
-## Remediation
-
-1. Open the [Amazon S3 console](https://console.aws.amazon.com/s3/).
-2. Choose the name of the bucket identified in the finding.
-3. Choose **Permissions** and then choose **Public access settings.**
-4. Choose **Edit**, select all four options, and then choose **Save**.
-5. If prompted, enter `confirm` and then choose **Confirm**.
diff --git a/pci_v321/docs/pci_v321_s3_2.md b/pci_v321/docs/pci_v321_s3_2.md
deleted file mode 100644
index d7518c6a..00000000
--- a/pci_v321/docs/pci_v321_s3_2.md
+++ /dev/null
@@ -1,15 +0,0 @@
-## Description
-
-This control checks whether your S3 buckets allow public read access by evaluating the Block Public Access settings, the bucket policy, and the bucket access control list (ACL).
-
-Unless you explicitly require everyone on the internet to be able to write to your S3 bucket, you should ensure that your S3 bucket is not publicly writable.
-
-It does not check for read access to the bucket by internal principals, such as IAM roles. You should ensure that access to the bucket is restricted to authorized principals only.
-
-## Remediation
-
-1. Open the [Amazon S3 console](https://console.aws.amazon.com/s3/).
-2. Choose the name of the bucket identified in the finding.
-3. Choose **Permissions** and then choose **Public access settings.**
-4. Choose **Edit**, select all four options, and then choose **Save**.
-5. If prompted, enter `confirm` and then choose **Confirm**.
diff --git a/pci_v321/docs/pci_v321_s3_3.md b/pci_v321/docs/pci_v321_s3_3.md
deleted file mode 100644
index 8f29796b..00000000
--- a/pci_v321/docs/pci_v321_s3_3.md
+++ /dev/null
@@ -1,19 +0,0 @@
-## Description
-
-This control checks whether S3 buckets have cross-region replication enabled.
-
-PCI DSS does not require data replication or highly available configurations. However, this check aligns with AWS best practices for this control.
-
-In addition to availability, you should consider other systems hardening settings.
-
-## Remediation
-
-1. Open the [Amazon S3 console](https://console.aws.amazon.com/s3/).
-2. Choose the S3 bucket that does not have cross-region replication enabled.
-3. Choose **Management**, then choose **Replication**.
-4. Choose **Add rule**. If versioning is not already enabled, you are prompted to enable it.
-5. Choose your source bucket - `Entire bucket`.
-6. Choose your destination bucket. If versioning is not already enabled on the destination bucket for your account, you are prompted to enable it.
-7. Choose an IAM role. For more information on setting up permissions for replication, see the [Amazon Simple Storage Service Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/dev/setting-repl-config-perm-overview.html).
-8. Enter a rule name, choose **Enabled** for the status, then choose **Next**.
-9. Choose **Save**
diff --git a/pci_v321/docs/pci_v321_s3_4.md b/pci_v321/docs/pci_v321_s3_4.md
deleted file mode 100644
index 2527f234..00000000
--- a/pci_v321/docs/pci_v321_s3_4.md
+++ /dev/null
@@ -1,18 +0,0 @@
-## Description
-
-This control checks that your Amazon S3 bucket either has Amazon S3 default encryption enabled or that the S3 bucket policy explicitly denies put-object requests without server-side encryption.
-
-When you set default encryption on a bucket, all new objects stored in the bucket are encrypted when they are stored, including clear text PAN data.
-
-Server-side encryption for all of the objects stored in a bucket can also be enforced using a bucket policy.
-
-## Remediation
-
-1. Open the [Amazon S3 console](https://console.aws.amazon.com/s3/).
-2. Choose the bucket from the list.
-3. Choose **Properties**.
-4. Choose **Default encryption**.
-5. For the encryption, choose either `AES-256` or `AWS-KMS`.
- 1. To use keys that are managed by Amazon S3 for default encryption, choose AES-256. For more information about using Amazon S3 server-side encryption to encrypt your data,
- 2. To use keys that are managed by AWS KMS for default encryption, choose AWS-KMS. Then choose a master key from the list of the AWS KMS master keys that you have created. Type the Amazon Resource Name (ARN) of the AWS KMS key to use. You can find the ARN for your AWS KMS key in the IAM console, under Encryption keys. Or, you can choose a key name from the drop-down list.
-6. Choose **Save**.
diff --git a/pci_v321/docs/pci_v321_s3_5.md b/pci_v321/docs/pci_v321_s3_5.md
deleted file mode 100644
index e0c82faf..00000000
--- a/pci_v321/docs/pci_v321_s3_5.md
+++ /dev/null
@@ -1,40 +0,0 @@
-## Description
-
-This control checks whether Amazon S3 buckets have policies that require requests to use Secure Socket Layer (SSL).
-
-S3 buckets should have policies that require all requests (Action: S3:*) to only accept transmission of data over HTTPS in the S3 resource policy, indicated by the condition key aws:SecureTransport.
-
-This does not check the SSL or TLS version. You should not allow early versions of SSL or TLS (SSLv3, TLS1.0) per PCI DSS requirements.
-
-## Remediation
-
-1. Open the [Amazon S3 console](https://console.aws.amazon.com/s3/).
-2. Navigate to the noncompliant bucket, and then choose the bucket name.
-3. Choose **Permissions**, then choose **Bucket Policy**.
-4. Add a similar policy statement to that in the policy below. Replace `awsexamplebucket` with the name of the bucket you are modifying.
-
-```json
-{
- "Id": "ExamplePolicy",
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "AllowSSLRequestsOnly",
- "Action": "s3:*",
- "Effect": "Deny",
- "Resource": [
- "arn:aws:s3:::awsexamplebucket",
- "arn:aws:s3:::awsexamplebucket/*"
- ],
- "Condition": {
- "Bool": {
- "aws:SecureTransport": "false"
- }
- },
- "Principal": "*"
- }
- ]
-}
-```
-
-5. Choose **Save**.
diff --git a/pci_v321/docs/pci_v321_s3_6.md b/pci_v321/docs/pci_v321_s3_6.md
deleted file mode 100644
index 87c8487d..00000000
--- a/pci_v321/docs/pci_v321_s3_6.md
+++ /dev/null
@@ -1,21 +0,0 @@
-## Description
-
-This control checks whether the following public access block settings are configured at the account level.
-
-- ignorePublicAcls: true,
-- blockPublicPolicy: true
-- blockPublicAcls: true
-- restrictPublicBuckets: true
-
-The control passes if all of the public access block settings are set to true.
-
-The control fails if any of the settings are set to false, or if any of the settings are not configured. When the settings do not have a value, the AWS Config rule cannot complete its evaluation.
-
-As an AWS best practice, S3 buckets should block public access. Unless you explicitly require everyone on the internet to be able to access your S3 bucket, you should ensure that your S3 bucket is not publicly accessible.
-
-## Remediation
-
-1. Open the [Amazon S3 console](https://console.aws.amazon.com/s3/).
-2. In the navigation pane, choose **Block public access** (account settings).
-3. Choose **Edit**. Then select `Block all public access`.
-4. Choose **Save** changes
diff --git a/pci_v321/docs/pci_v321_sagemaker.md b/pci_v321/docs/pci_v321_sagemaker.md
deleted file mode 100644
index 68dac1fd..00000000
--- a/pci_v321/docs/pci_v321_sagemaker.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring AWS Sagemaker resources and options.
diff --git a/pci_v321/docs/pci_v321_sagemaker_1.md b/pci_v321/docs/pci_v321_sagemaker_1.md
deleted file mode 100644
index 2347c561..00000000
--- a/pci_v321/docs/pci_v321_sagemaker_1.md
+++ /dev/null
@@ -1,23 +0,0 @@
-## Description
-
-This control checks whether direct internet access is disabled for an SageMaker notebook instance. To do this, it checks whether the DirectInternetAccess field is disabled for the notebook instance.
-
-If you configure your SageMaker instance without a VPC, then by default direct internet access is enabled on your instance. You should configure your instance with a VPC and change the default setting to `Disable — Access the internet through a VPC`.
-
-To train or host models from a notebook, you need internet access. To enable internet access, make sure that your VPC has a NAT gateway and your security group allows outbound connections. To learn more about how to connect a notebook instance to resources in a VPC, see [Connect a notebook instance to resources in a VPC](https://docs.aws.amazon.com/sagemaker/latest/dg/appendix-notebook-and-internet-access.html) in the Amazon SageMaker Developer Guide.
-
-You should also ensure that access to your SageMaker configuration is limited to only authorized users. Restrict users' IAM permissions to modify SageMaker settings and resources.
-
-## Remediation
-
-Note that you cannot change the internet access setting after a notebook instance is created. It must be stopped, deleted, and recreated.
-
-To configure an SageMaker notebook instance to deny direct internet access
-
-1. Open the [SageMaker console](https://console.aws.amazon.com/sagemaker/)
-2. Navigate to **Notebook instances**.
-3. Delete the instance that has direct internet access enabled. Choose the instance, choose Actions, then choose stop.
-4. After the instance is stopped, choose **Actions**, then choose **delete**.
-5. Choose Create notebook instance. Provide the configuration details.
-6. Expand the **Network** section. Then choose a VPC, subnet, and security group. Under **Direct internet access**, choose **Disable — Access the internet through a VPC**.
-7. Choose **Create notebook instance**.
diff --git a/pci_v321/docs/pci_v321_ssm.md b/pci_v321/docs/pci_v321_ssm.md
deleted file mode 100644
index 8cfe4ca2..00000000
--- a/pci_v321/docs/pci_v321_ssm.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Overview
-
-This section contains recommendations for configuring AWS SSM resources and options.
diff --git a/pci_v321/docs/pci_v321_ssm_1.md b/pci_v321/docs/pci_v321_ssm_1.md
deleted file mode 100644
index 48a8431f..00000000
--- a/pci_v321/docs/pci_v321_ssm_1.md
+++ /dev/null
@@ -1,21 +0,0 @@
-## Description
-
-This control checks whether the compliance status of the Amazon EC2 Systems Manager patch compliance is COMPLIANT or NON_COMPLIANT after the patch installation on the instance.
-
-It only checks instances that are managed by AWS Systems Manager Patch Manager.
-
-It does not check whether the patch was applied within the 30-day limit prescribed by PCI DSS requirement 6.2.
-
-It also does not validate whether the patches applied were classified as security patches.
-
-## Remediation
-
-This rule checks whether the compliance status of the Amazon EC2 Systems Manager patch compliance is COMPLIANT or NON_COMPLIANT. To find out more about patch compliance states, see the [AWS Systems Manager User Guide](https://docs.aws.amazon.com/systems-manager/latest/userguide/about-patch-compliance-states.html).
-
-1. Open the [AWS Systems Manager console](https://console.aws.amazon.com/systems-manager/.)
-2. In the navigation pane, under **Instances & Nodes**, choose **Run Command**.
-3. Choose **Run command**.
-4. Choose the radio button next to AWS-RunPatchBaseline and then change the **Operation to Install**.
-5. Choose **Choose instances manually** and then choose the noncompliant instance(s).
-6. Scroll to the bottom and then choose **Run**.
-7. After the command has completed, to monitor the new compliance status of your patched instances, in the navigation pane, choose Compliance.
diff --git a/pci_v321/docs/pci_v321_ssm_2.md b/pci_v321/docs/pci_v321_ssm_2.md
deleted file mode 100644
index c4ab4176..00000000
--- a/pci_v321/docs/pci_v321_ssm_2.md
+++ /dev/null
@@ -1,24 +0,0 @@
-## Description
-
-This control checks whether the status of the AWS Systems Manager association compliance is COMPLIANT or NON_COMPLIANT after the association is run on an instance. The control passes if the association compliance status is COMPLIANT.
-
-A State Manager association is a configuration that is assigned to your managed instances. The configuration defines the state that you want to maintain on your instances. For example, an association can specify that antivirus software must be installed and running on your instances, or that certain ports must be closed.
-
-After you create one or more State Manager associations, compliance status information is immediately available to you in the console or in response to AWS CLI commands or corresponding Systems Manager API operations. For associations, Configuration Compliance shows statuses of Compliant or Non-compliant and the severity level assigned to the association, such as Critical or Medium. To learn more about State Manager association compliance, see About State Manager association compliance in the AWS Systems Manager User Guide.
-
-You must configure your in-scope EC2 instances for Systems Manager association. You must also configure the patch baseline for the security rating of the vendor of patches, and set the autoapproval date to meet PCI DSS 3.2.1 requirement 6.2. For additional guidance on how to create an association, see [Create an association](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-state-assoc.html) in the AWS Systems Manager User Guide. For additional information on working with patching in Systems Manager, see [AWS Systems Manager Patch Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-patch.html) in the AWS Systems Manager User Guide.
-
-## Remediation
-
-A failed association can be related to different things, including targets and SSM document names. To remediate this issue, you must first identify and investigate the association. You can then update the association to correct the specific issue.
-
-You can edit an association to specify a new name, schedule, severity level, or targets. After you edit an association, Systems Manager creates a new version. To investigate and update a failed association
-
-1. Open the [AWS Systems Manager console](https://console.aws.amazon.com/systems-manager/).
-2. In the navigation pane, under **Instances & Nodes**, choose **Managed Instances**.
-3. Choose the instance ID that has an **Association status** of **Failed**.
-4. Choose **View details**.
-5. Choose **Associations**.
-6. Note the name of the association that has an **Association status** of **Failed**. This is the association that you need to investigate. You need to use the association name in the next step.
-7. In the navigation pane, choose **State Manager**. Search for the association name, then select the association.
-8. After you determine the issue, edit the failed association to correct the problem. For information on how to edit an association, see [Edit an association](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-state-assoc-edit.html).
diff --git a/pci_v321/docs/pci_v321_ssm_3.md b/pci_v321/docs/pci_v321_ssm_3.md
deleted file mode 100644
index 09173e88..00000000
--- a/pci_v321/docs/pci_v321_ssm_3.md
+++ /dev/null
@@ -1,16 +0,0 @@
-## Description
-
-This control checks whether the EC2 instances in your account are managed by Systems Manager.
-
-AWS Systems Manager is an AWS service that you can use to view and control your AWS infrastructure. To help you to maintain security and compliance, Systems Manager scans your managed instances. A managed instance is a machine that is configured for use with Systems Manager. Systems Manager then reports or takes corrective action on any policy violations that it detects. Systems Manager also helps you to configure and maintain your managed instances. Additional configuration is needed in Systems Manager for patch deployment to managed EC2 instances.
-
-## Remediation
-
-You can use the Systems Manager quick setup to set up Systems Manager to manage your EC2 instances.
-
-To determine whether your instances can support Systems Manager associations, see [Systems Manager prerequisites](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-prereqs.html) in the AWS Systems Manager User Guide.
-
-1. Open the [AWS Systems Manager console](https://console.aws.amazon.com/systems-manager/).
-2. In the navigation pane, choose **Quick setup**.
-3. On the configuration screen, keep the default options.
-4. Choose **Set up Systems Manager**.
diff --git a/pci_v321/ec2.sp b/pci_v321/ec2.sp
deleted file mode 100644
index 3317e20e..00000000
--- a/pci_v321/ec2.sp
+++ /dev/null
@@ -1,100 +0,0 @@
-locals {
- pci_v321_ec2_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/EC2"
- })
-}
-
-benchmark "pci_v321_ec2" {
- title = "EC2"
- documentation = file("./pci_v321/docs/pci_v321_ec2.md")
- children = [
- control.pci_v321_ec2_1,
- control.pci_v321_ec2_2,
- control.pci_v321_ec2_3,
- control.pci_v321_ec2_4,
- control.pci_v321_ec2_5,
- control.pci_v321_ec2_6,
- ]
-
- tags = merge(local.pci_v321_ec2_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_ec2_1" {
- title = "1 Amazon EBS snapshots should not be publicly restorable"
- description = "This control checks whether Amazon Elastic Block Store snapshots are not publicly restorable by everyone, which makes them public. Amazon EBS snapshots should not be publicly restorable by everyone unless you explicitly allow it, to avoid accidental exposure of your company’s sensitive data."
- severity = "critical"
- query = query.ebs_snapshot_not_publicly_restorable
- documentation = file("./pci_v321/docs/pci_v321_ec2_1.md")
-
- tags = merge(local.pci_v321_ec2_common_tags, {
- pci_item_id = "ec2_1"
- pci_requirements = "1.2.1,1.3.1,1.3.4,1.3.4,7.2.1"
- })
-}
-
-control "pci_v321_ec2_2" {
- title = "2 VPC default security group should prohibit inbound and outbound traffic"
- description = "This control checks that the default security group of a VPC does not allow inbound or outbound traffic. It does not check for access restrictions for other security groups that are not default, and other VPC configurations."
- severity = "medium"
- query = query.vpc_default_security_group_restricts_all_traffic
- documentation = file("./pci_v321/docs/pci_v321_ec2_2.md")
-
- tags = merge(local.pci_v321_ec2_common_tags, {
- pci_item_id = "ec2_2"
- pci_requirements = "1.2.1,1.3.4,2.1"
- })
-}
-
-control "pci_v321_ec2_3" {
- title = "3 Unused EC2 security groups should be removed"
- description = "This control helps you maintain an accurate asset inventory of needed security groups in your cardholder data environment (CDE). It does so by checking that security groups are attached to Amazon EC2 instances or to an ENI. A failed finding indicates you may have unused Amazon EC2 security groups."
- severity = "low"
- query = query.vpc_security_group_associated
- documentation = file("./pci_v321/docs/pci_v321_ec2_3.md")
-
- tags = merge(local.pci_v321_ec2_common_tags, {
- pci_item_id = "ec2_3"
- pci_requirements = "2.4"
- })
-}
-
-control "pci_v321_ec2_4" {
- title = "4 Unused EC2 EIPs should be removed"
- description = "This control checks whether Elastic IP addresses that are allocated to a VPC are attached to Amazon EC2 instances or in-use elastic network interfaces (ENIs). A failed finding indicates you may have unused Amazon EC2 EIPs. This will help you maintain an accurate asset inventory of EIPs in your cardholder data environment (CDE)."
- severity = "low"
- query = query.vpc_eip_associated
- documentation = file("./pci_v321/docs/pci_v321_ec2_4.md")
-
- tags = merge(local.pci_v321_ec2_common_tags, {
- pci_item_id = "ec2_4"
- pci_requirements = "2.4"
- })
-}
-
-control "pci_v321_ec2_5" {
- title = "5 Security groups should not allow ingress from 0.0.0.0/0 to port 22"
- description = "This control checks whether security groups in use disallow unrestricted incoming SSH traffic. It does not evaluate outbound traffic. Note that security groups are stateful. If you send a request from your instance, the response traffic for that request is allowed to flow in regardless of inbound security group rules. Responses to allowed inbound traffic are allowed to flow out regardless of outbound rules."
- severity = "high"
- query = query.vpc_security_group_remote_administration
- documentation = file("./pci_v321/docs/pci_v321_ec2_5.md")
-
- tags = merge(local.pci_v321_ec2_common_tags, {
- pci_item_id = "ec2_5"
- pci_requirements = "1.2.1,1.3.1,2.2.2"
- })
-}
-
-control "pci_v321_ec2_6" {
- title = "6 VPC flow logging should be enabled in all VPCs"
- description = "This control checks whether VPC flow logs are found and enabled for VPCs. The traffic type is set to REJECT. With VPC Flow Logs, you can capture information about the IP address traffic to and from network interfaces in your VPC. After you create a flow log, you can use CloudWatch Logs to view and retrieve the log data. Security Hub recommends that you enable flow logging for packet rejects for VPCs. Flow logs provide visibility into network traffic that traverses the VPC. They can detect anomalous traffic and provide insight into security workflows. By default, the record includes values for the different components of the IP address flow, including the source, destination, and protocol."
- severity = "medium"
- query = query.vpc_flow_logs_enabled
- documentation = file("./pci_v321/docs/pci_v321_ec2_6.md")
-
- tags = merge(local.pci_v321_ec2_common_tags, {
- pci_item_id = "ec2_6"
- pci_requirements = "10.3.3,10.3.4,10.3.5,10.3.6"
- })
-}
diff --git a/pci_v321/elbv2.sp b/pci_v321/elbv2.sp
deleted file mode 100644
index 8d956d1b..00000000
--- a/pci_v321/elbv2.sp
+++ /dev/null
@@ -1,30 +0,0 @@
-locals {
- pci_v321_elbv2_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/ELBv2"
- })
-}
-
-benchmark "pci_v321_elbv2" {
- title = "ELBV2"
- documentation = file("./pci_v321/docs/pci_v321_elbv2.md")
- children = [
- control.pci_v321_elbv2_1
- ]
-
- tags = merge(local.pci_v321_elbv2_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_elbv2_1" {
- title = "1 Application Load Balancer should be configured to redirect all HTTP requests to HTTPS"
- description = "This control checks whether HTTP to HTTPS redirection is configured on all HTTP listeners of Application Load Balancers. The control fails if any of the HTTP listeners of Application Load Balancers do not have HTTP to HTTPS redirection configured."
- severity = "medium"
- query = query.elb_application_lb_redirect_http_request_to_https
- documentation = file("./pci_v321/docs/pci_v321_elbv2_1.md")
-
- tags = merge(local.pci_v321_elbv2_common_tags, {
- pci_item_id = "elbv2_1"
- pci_requirements = "2.3,4.1"
- })
-}
diff --git a/pci_v321/es.sp b/pci_v321/es.sp
deleted file mode 100644
index 16eaf132..00000000
--- a/pci_v321/es.sp
+++ /dev/null
@@ -1,44 +0,0 @@
-locals {
- pci_v321_es_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/ES"
- })
-}
-
-benchmark "pci_v321_es" {
- title = "Elasticsearch"
- documentation = file("./pci_v321/docs/pci_v321_es.md")
- children = [
- control.pci_v321_es_1,
- control.pci_v321_es_2,
- ]
-
- tags = merge(local.pci_v321_es_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_es_1" {
- title = "1 Amazon Elasticsearch Service domains should be in a VPC"
- description = "This control checks whether Amazon Elasticsearch Service domains are in a VPC. It does not evaluate the VPC subnet routing configuration to determine public reachability."
- severity = "critical"
- query = query.es_domain_in_vpc
- documentation = file("./pci_v321/docs/pci_v321_es_1.md")
-
- tags = merge(local.pci_v321_es_common_tags, {
- pci_item_id = "es_1"
- pci_requirements = "1.2.1,1.3.1,1.3.2,1.3.4,1.3.6"
- })
-}
-
-control "pci_v321_es_2" {
- title = "2 Amazon Elasticsearch Service domains should have encryption at rest enabled"
- description = "This control checks whether Amazon ES domains have encryption at rest configuration enabled."
- severity = "medium"
- query = query.es_domain_encryption_at_rest_enabled
- documentation = file("./pci_v321/docs/pci_v321_es_2.md")
-
- tags = merge(local.pci_v321_es_common_tags, {
- pci_item_id = "es_2"
- pci_requirements = "3.4"
- })
-}
diff --git a/pci_v321/guardduty.sp b/pci_v321/guardduty.sp
deleted file mode 100644
index 569e0dad..00000000
--- a/pci_v321/guardduty.sp
+++ /dev/null
@@ -1,30 +0,0 @@
-locals {
- pci_v321_guardduty_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/GuardDuty"
- })
-}
-
-benchmark "pci_v321_guardduty" {
- title = "GuardDuty"
- documentation = file("./pci_v321/docs/pci_v321_guardduty.md")
- children = [
- control.pci_v321_guardduty_1
- ]
-
- tags = merge(local.pci_v321_guardduty_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_guardduty_1" {
- title = "1 GuardDuty should be enabled"
- description = "This control checks whether Amazon GuardDuty is enabled in your AWS account and Region."
- severity = "high"
- query = query.guardduty_enabled
- documentation = file("./pci_v321/docs/pci_v321_guardduty_1.md")
-
- tags = merge(local.pci_v321_guardduty_common_tags, {
- pci_item_id = "guardduty_1"
- pci_requirements = "11.4"
- })
-}
diff --git a/pci_v321/iam.sp b/pci_v321/iam.sp
deleted file mode 100644
index 65ef561a..00000000
--- a/pci_v321/iam.sp
+++ /dev/null
@@ -1,128 +0,0 @@
-locals {
- pci_v321_iam_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/IAM"
- })
-}
-
-benchmark "pci_v321_iam" {
- title = "IAM"
- documentation = file("./pci_v321/docs/pci_v321_iam.md")
- children = [
- control.pci_v321_iam_1,
- control.pci_v321_iam_2,
- control.pci_v321_iam_3,
- control.pci_v321_iam_4,
- control.pci_v321_iam_5,
- control.pci_v321_iam_6,
- control.pci_v321_iam_7,
- control.pci_v321_iam_8,
- ]
-
- tags = merge(local.pci_v321_iam_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_iam_1" {
- title = "1 IAM root user access key should not exist"
- description = "This control checks whether user access keys exist for the root user."
- severity = "critical"
- query = query.iam_root_user_no_access_keys
- documentation = file("./pci_v321/docs/pci_v321_iam_1.md")
-
- tags = merge(local.pci_v321_iam_common_tags, {
- pci_item_id = "iam_1"
- pci_requirements = "2.1,2.2,7.2.1"
- })
-}
-
-control "pci_v321_iam_2" {
- title = "2 IAM users should not have IAM policies attached"
- description = "This control checks that none of your IAM users have policies attached. IAM users must inherit permissions from IAM groups or roles. It does not check whether least privileged policies are applied to IAM roles and groups."
- severity = "low"
- query = query.iam_user_no_inline_attached_policies
- documentation = file("./pci_v321/docs/pci_v321_iam_2.md")
-
- tags = merge(local.pci_v321_iam_common_tags, {
- pci_item_id = "iam_2"
- pci_requirements = "7.2.1"
- })
-}
-
-control "pci_v321_iam_3" {
- title = "3 IAM policies should not allow full '*' administrative privileges"
- description = "This control checks whether the default version of AWS Identity and Access Management policies (also known as customer managed policies) do not have administrator access with a statement that has 'Effect': 'Allow' with 'Action': '*' over 'Resource': '*'."
- severity = "high"
- query = query.iam_policy_custom_no_star_star
- documentation = file("./pci_v321/docs/pci_v321_iam_3.md")
-
- tags = merge(local.pci_v321_iam_common_tags, {
- pci_item_id = "iam_3"
- pci_requirements = "7.2.1"
- })
-}
-
-control "pci_v321_iam_4" {
- title = "4 Hardware MFA should be enabled for the root user"
- description = "This control checks whether your AWS account is enabled to use multi-factor authentication (MFA) hardware device to sign in with root user credentials. It does not check whether you are using virtual MFA."
- severity = "critical"
- query = query.iam_root_user_hardware_mfa_enabled
- documentation = file("./pci_v321/docs/pci_v321_iam_4.md")
-
- tags = merge(local.pci_v321_iam_common_tags, {
- pci_item_id = "iam_4"
- pci_requirements = "8.3.1"
- })
-}
-
-control "pci_v321_iam_5" {
- title = "5 Virtual MFA should be enabled for the root user"
- description = "This control checks whether users of your AWS account require a multi-factor authentication (MFA) device to sign in with root user credentials. It does not check whether you are using hardware MFA."
- severity = "critical"
- query = query.iam_root_user_virtual_mfa
- #documentation = file("./pci_v321/docs/pci_v321_iam_5.md")
-
- tags = merge(local.pci_v321_iam_common_tags, {
- pci_item_id = "iam_5"
- pci_requirements = "8.3.1"
- })
-}
-
-control "pci_v321_iam_6" {
- title = "6 MFA should be enabled for all IAM users"
- description = "This control checks whether the IAM users have multi-factor authentication (MFA) enabled."
- severity = "medium"
- query = query.iam_user_mfa_enabled
- #documentation = file("./pci_v321/docs/pci_v321_iam_6.md")
-
- tags = merge(local.pci_v321_iam_common_tags, {
- pci_item_id = "iam_6"
- pci_requirements = "8.3.1"
- })
-}
-
-control "pci_v321_iam_7" {
- title = "7 IAM user credentials should be disabled if not used within a predefined number of days"
- description = "This control checks whether your IAM users have passwords or active access keys that have not been used within a specified number of days. The default is 90 days."
- severity = "medium"
- query = query.iam_user_unused_credentials_90
- #documentation = file("./pci_v321/docs/pci_v321_iam_7.md")
-
- tags = merge(local.pci_v321_iam_common_tags, {
- pci_item_id = "iam_7"
- pci_requirements = "8.1.4"
- })
-}
-
-control "pci_v321_iam_8" {
- title = "8 Password policies for IAM users should have strong configurations"
- description = "This control checks whether the account password policy for IAM users uses the following minimum PCI DSS configurations."
- severity = "medium"
- query = query.iam_account_password_policy_strong
- #documentation = file("./pci_v321/docs/pci_v321_iam_8.md")
-
- tags = merge(local.pci_v321_iam_common_tags, {
- pci_item_id = "iam_8"
- pci_requirements = "8.1.4,8.2.3,8.2.4,8.2.5"
- })
-}
diff --git a/pci_v321/kms.sp b/pci_v321/kms.sp
deleted file mode 100644
index 0675cb7e..00000000
--- a/pci_v321/kms.sp
+++ /dev/null
@@ -1,30 +0,0 @@
-locals {
- pci_v321_kms_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/KMS"
- })
-}
-
-benchmark "pci_v321_kms" {
- title = "KMS"
- documentation = file("./pci_v321/docs/pci_v321_kms.md")
- children = [
- control.pci_v321_kms_1
- ]
-
- tags = merge(local.pci_v321_kms_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_kms_1" {
- title = "1 Customer master key (CMK) rotation should be enabled"
- description = "This control checks that key rotation is enabled for each customer master key (CMK). It does not check CMKs that have imported key material. You should ensure keys that have imported material and those that are not stored in AWS KMS are rotated. AWS managed customer master keys are rotated once every 3 years."
- severity = "medium"
- query = query.kms_cmk_rotation_enabled
- documentation = file("./pci_v321/docs/pci_v321_kms_1.md")
-
- tags = merge(local.pci_v321_kms_common_tags, {
- pci_item_id = "kms_1"
- pci_requirements = "3.6.4"
- })
-}
diff --git a/pci_v321/lambda.sp b/pci_v321/lambda.sp
deleted file mode 100644
index bc3a806a..00000000
--- a/pci_v321/lambda.sp
+++ /dev/null
@@ -1,44 +0,0 @@
-locals {
- pci_v321_lambda_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/Lambda"
- })
-}
-
-benchmark "pci_v321_lambda" {
- title = "Lambda"
- documentation = file("./pci_v321/docs/pci_v321_lambda.md")
- children = [
- control.pci_v321_lambda_1,
- control.pci_v321_lambda_2
- ]
-
- tags = merge(local.pci_v321_lambda_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_lambda_1" {
- title = "1 Lambda functions should prohibit public access"
- description = "This control checks whether the Lambda function resource-based policy prohibits public access. It does not check for access to the Lambda function by internal principals, such as IAM roles. You should ensure that access to the Lambda function is restricted to authorized principals only by using least privilege Lambda resource-based policies."
- severity = "critical"
- query = query.lambda_function_restrict_public_access
- documentation = file("./pci_v321/docs/pci_v321_lambda_1.md")
-
- tags = merge(local.pci_v321_lambda_common_tags, {
- pci_item_id = "lambda_1"
- pci_requirements = "1.2.1,1.3.1,1.3.2,1.3.4,7.2.1"
- })
-}
-
-control "pci_v321_lambda_2" {
- title = "2 Lambda functions should be in a VPC"
- description = "This control checks whether a Lambda function is in a VPC. It does not evaluate the VPC subnet routing configuration to determine public reachability."
- severity = "critical"
- query = query.lambda_function_in_vpc
- documentation = file("./pci_v321/docs/pci_v321_lambda_2.md")
-
- tags = merge(local.pci_v321_lambda_common_tags, {
- pci_item_id = "lambda_1"
- pci_requirements = "1.2.1,1.3.1,1.3.2,1.3.4"
- })
-}
diff --git a/pci_v321/opensearch.sp b/pci_v321/opensearch.sp
deleted file mode 100644
index d79b6391..00000000
--- a/pci_v321/opensearch.sp
+++ /dev/null
@@ -1,44 +0,0 @@
-locals {
- pci_v321_opensearch_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/OpenSearch"
- })
-}
-
-benchmark "pci_v321_opensearch" {
- title = "OpenSearch"
- documentation = file("./pci_v321/docs/pci_v321_opensearch.md")
- children = [
- control.pci_v321_opensearch_1,
- control.pci_v321_opensearch_2
- ]
-
- tags = merge(local.pci_v321_opensearch_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_opensearch_1" {
- title = "1 Amazon OpenSearch domains should be in a VPC"
- description = "This control checks whether Amazon OpenSearch domains are in a VPC. It does not evaluate the VPC subnet routing configuration to determine public access."
- severity = "medium"
- query = query.opensearch_domain_in_vpc
- documentation = file("./pci_v321/docs/pci_v321_opensearch_1.md")
-
- tags = merge(local.pci_v321_opensearch_common_tags, {
- pci_item_id = "opensearch_1"
- pci_requirements = "1.2.1,1.3.1,1.3.2,1.3.4,1.3.6"
- })
-}
-
-control "pci_v321_opensearch_2" {
- title = "2 OpenSearch domains should have encryption at rest enabled"
- description = "This control checks whether Amazon OpenSearch domains have encryption-at-rest configuration enabled. The check fails if encryption at rest is not enabled."
- severity = "medium"
- query = query.opensearch_domain_encryption_at_rest_enabled
- documentation = file("./pci_v321/docs/pci_v321_opensearch_2.md")
-
- tags = merge(local.pci_v321_opensearch_common_tags, {
- pci_item_id = "opensearch_2"
- pci_requirements = "3.4"
- })
-}
diff --git a/pci_v321/pci.sp b/pci_v321/pci.sp
deleted file mode 100644
index 2dc0b332..00000000
--- a/pci_v321/pci.sp
+++ /dev/null
@@ -1,37 +0,0 @@
-locals {
- pci_v321_common_tags = merge(local.aws_compliance_common_tags, {
- pci = "true"
- pci_version = "v3.2.1"
- })
-}
-
-benchmark "pci_v321" {
- title = "PCI v3.2.1"
- description = "The Payment Card Industry Data Security Standard (PCI DSS) standard in Security Hub consists of a set of AWS security best practices controls. Each control applies to a specific AWS resource, and relates to one or more PCI DSS version 3.2.1 requirements. A PCI DSS requirement can be related to multiple controls."
- documentation = file("./pci_v321/docs/pci_overview.md")
- children = [
- benchmark.pci_v321_autoscaling,
- benchmark.pci_v321_cloudtrail,
- benchmark.pci_v321_codebuild,
- benchmark.pci_v321_config,
- benchmark.pci_v321_cw,
- benchmark.pci_v321_dms,
- benchmark.pci_v321_ec2,
- benchmark.pci_v321_elbv2,
- benchmark.pci_v321_es,
- benchmark.pci_v321_guardduty,
- benchmark.pci_v321_iam,
- benchmark.pci_v321_kms,
- benchmark.pci_v321_lambda,
- benchmark.pci_v321_opensearch,
- benchmark.pci_v321_rds,
- benchmark.pci_v321_redshift,
- benchmark.pci_v321_s3,
- benchmark.pci_v321_sagemaker,
- benchmark.pci_v321_ssm
- ]
-
- tags = merge(local.pci_v321_common_tags, {
- type = "Benchmark"
- })
-}
diff --git a/pci_v321/rds.sp b/pci_v321/rds.sp
deleted file mode 100644
index c60ca49d..00000000
--- a/pci_v321/rds.sp
+++ /dev/null
@@ -1,44 +0,0 @@
-locals {
- pci_v321_rds_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/RDS"
- })
-}
-
-benchmark "pci_v321_rds" {
- title = "RDS"
- documentation = file("./pci_v321/docs/pci_v321_rds.md")
- children = [
- control.pci_v321_rds_1,
- control.pci_v321_rds_2,
- ]
-
- tags = merge(local.pci_v321_rds_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_rds_1" {
- title = "1 RDS snapshots should prohibit public access"
- description = "This control checks whether Amazon RDS DB snapshots prohibit access by other accounts. You should also ensure that access to the snapshot and permission to change Amazon RDS configuration is restricted to authorized principals only."
- severity = "critical"
- query = query.rds_db_snapshot_prohibit_public_access
- documentation = file("./pci_v321/docs/pci_v321_rds_1.md")
-
- tags = merge(local.pci_v321_rds_common_tags, {
- pci_item_id = "rds_1"
- pci_requirements = "1.2.1,1.3.1,1.3.4,1.3.6,7.2.1"
- })
-}
-
-control "pci_v321_rds_2" {
- title = "2 RDS DB Instances should prohibit public access"
- description = "This control checks whether RDS instances are publicly accessible by evaluating the publiclyAccessible field in the instance configuration item. The value of publiclyAccessible indicates whether the DB instance is publicly accessible. When the DB instance is publicly accessible,it is an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. When the DB instance isn't publicly accessible, it is an internal instance with a DNS name that resolves to a private IP address."
- severity = "critical"
- query = query.rds_db_instance_prohibit_public_access
- documentation = file("./pci_v321/docs/pci_v321_rds_2.md")
-
- tags = merge(local.pci_v321_rds_common_tags, {
- pci_item_id = "rds_2"
- pci_requirements = "1.2.1,1.3.1,1.3.2,1.3.4,1.3.6,7.2.1"
- })
-}
diff --git a/pci_v321/redshift.sp b/pci_v321/redshift.sp
deleted file mode 100644
index 6c3f99ed..00000000
--- a/pci_v321/redshift.sp
+++ /dev/null
@@ -1,30 +0,0 @@
-locals {
- pci_v321_redshift_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/Redshift"
- })
-}
-
-benchmark "pci_v321_redshift" {
- title = "Redshift"
- documentation = file("./pci_v321/docs/pci_v321_redshift.md")
- children = [
- control.pci_v321_redshift_1
- ]
-
- tags = merge(local.pci_v321_redshift_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_redshift_1" {
- title = "1 Amazon Redshift clusters should prohibit public access"
- description = "This control checks whether Amazon Redshift clusters are publicly accessible by evaluating the publiclyAccessible field in the cluster configuration item."
- severity = "critical"
- query = query.redshift_cluster_prohibit_public_access
- documentation = file("./pci_v321/docs/pci_v321_redshift_1.md")
-
- tags = merge(local.pci_v321_redshift_common_tags, {
- pci_item_id = "redshift_1"
- pci_requirements = "1.2.1,1.3.1,1.3.2,1.3.4,1.3.6"
- })
-}
diff --git a/pci_v321/s3.sp b/pci_v321/s3.sp
deleted file mode 100644
index 82051d43..00000000
--- a/pci_v321/s3.sp
+++ /dev/null
@@ -1,101 +0,0 @@
-locals {
- pci_v321_s3_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/S3"
- })
-}
-
-benchmark "pci_v321_s3" {
- title = "S3"
- documentation = file("./pci_v321/docs/pci_v321_s3.md")
- children = [
- control.pci_v321_s3_1,
- control.pci_v321_s3_2,
- control.pci_v321_s3_3,
- control.pci_v321_s3_4,
- control.pci_v321_s3_5,
- control.pci_v321_s3_6,
- ]
-
- tags = merge(local.pci_v321_s3_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_s3_1" {
- title = "1 S3 buckets should prohibit public write access"
- description = "This control checks whether your S3 buckets allow public write access by evaluating the Block Public Access settings, the bucket policy, and the bucket access control list (ACL). It does not check for write access to the bucket by internal principals, such as IAM roles. You should ensure that access to the bucket is restricted to authorized principals only."
- severity = "critical"
- query = query.s3_bucket_restrict_public_write_access
- documentation = file("./pci_v321/docs/pci_v321_s3_1.md")
-
- tags = merge(local.pci_v321_s3_common_tags, {
- pci_item_id = "s3_1"
- pci_requirements = "1.2.1,1.3.1,1.3.2,1.3.4,1.3.6,7.2.1"
- })
-}
-
-control "pci_v321_s3_2" {
- title = "2 S3 buckets should prohibit public read access"
- description = "This control checks whether your S3 buckets allow public read access by evaluating the Block Public Access settings, the bucket policy, and the bucket access control list (ACL). Unless you explicitly require everyone on the internet to be able to write to your S3 bucket, you should ensure that your S3 bucket is not publicly writable. It does not check for read access to the bucket by internal principals, such as IAM roles. You should ensure that access to the bucket is restricted to authorized principals only."
- severity = "critical"
- query = query.s3_bucket_restrict_public_read_access
- documentation = file("./pci_v321/docs/pci_v321_s3_2.md")
-
- tags = merge(local.pci_v321_s3_common_tags, {
- pci_item_id = "s3_2"
- pci_requirements = "1.2.1,1.3.1,1.3.2,1.3.6,7.2.1"
- })
-}
-
-control "pci_v321_s3_3" {
- title = "3 S3 buckets should have cross-region replication enabled"
- description = "This control checks whether S3 buckets have cross-region replication enabled. PCI DSS does not require data replication or highly available configurations. However, this check aligns with AWS best practices for this control."
- severity = "low"
- query = query.s3_bucket_cross_region_replication_enabled
- documentation = file("./pci_v321/docs/pci_v321_s3_3.md")
-
- tags = merge(local.pci_v321_s3_common_tags, {
- pci_item_id = "s3_3"
- pci_requirements = "2.2"
- })
-}
-
-control "pci_v321_s3_4" {
- title = "4 S3 buckets should have server-side encryption enabled"
- description = "This control checks that your Amazon S3 bucket either has Amazon S3 default encryption enabled or that the S3 bucket policy explicitly denies put-object requests without server-side encryption. When you set default encryption on a bucket, all new objects stored in the bucket are encrypted when they are stored, including clear text PAN data. Server-side encryption for all of the objects stored in a bucket can also be enforced using a bucket policy."
- severity = "medium"
- query = query.s3_bucket_default_encryption_enabled
- documentation = file("./pci_v321/docs/pci_v321_s3_4.md")
-
- tags = merge(local.pci_v321_s3_common_tags, {
- pci_item_id = "s3_4"
- pci_requirements = "3.4"
- })
-}
-
-control "pci_v321_s3_5" {
- title = "5 S3 buckets should require requests to use Secure Socket Layer"
- description = "This control checks whether Amazon S3 buckets have policies that require requests to use Secure Socket Layer (SSL). S3 buckets should have policies that require all requests (Action: S3:*)to only accept transmission of data over HTTPS in the S3 resource policy, indicated by the condition key aws:SecureTransport."
- severity = "medium"
- query = query.s3_bucket_enforces_ssl
- documentation = file("./pci_v321/docs/pci_v321_s3_5.md")
-
- tags = merge(local.pci_v321_s3_common_tags, {
- pci_item_id = "s3_5"
- pci_requirements = "4.1"
-
- })
-}
-
-control "pci_v321_s3_6" {
- title = "6 S3 Block Public Access setting should be enabled"
- description = "This control checks whether the following public access block settings are configured at the account level. The control passes if all of the public access block settings are set to true. The control fails if any of the settings are set to false, or if any of the settings are not configured. When the settings do not have a value, the AWS Config rule cannot complete its evaluation."
- severity = "medium"
- query = query.s3_public_access_block_bucket_account
- documentation = file("./pci_v321/docs/pci_v321_s3_6.md")
-
- tags = merge(local.pci_v321_s3_common_tags, {
- pci_item_id = "s3_6"
- pci_requirements = "1.2.1,1.3.1,1.3.2,1.3.4,1.3.6"
- })
-}
diff --git a/pci_v321/sagemaker.sp b/pci_v321/sagemaker.sp
deleted file mode 100644
index 6177879c..00000000
--- a/pci_v321/sagemaker.sp
+++ /dev/null
@@ -1,30 +0,0 @@
-locals {
- pci_v321_sagemaker_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/SageMaker"
- })
-}
-
-benchmark "pci_v321_sagemaker" {
- title = "SageMaker"
- documentation = file("./pci_v321/docs/pci_v321_sagemaker.md")
- children = [
- control.pci_v321_sagemaker_1,
- ]
-
- tags = merge(local.pci_v321_sagemaker_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_sagemaker_1" {
- title = "1 Amazon SageMaker notebook instances should not have direct internet access"
- description = "This control checks whether direct internet access is disabled for an SageMaker notebook instance. To do this, it checks whether the DirectInternetAccess field is disabled for the notebook instance."
- severity = "high"
- query = query.sagemaker_notebook_instance_direct_internet_access_disabled
- documentation = file("./pci_v321/docs/pci_v321_sagemaker_1.md")
-
- tags = merge(local.pci_v321_sagemaker_common_tags, {
- pci_item_id = "sagemaker_1"
- pci_requirements = "1.2.1,1.3.1,1.3.2,1.3.4,1.3.6"
- })
-}
diff --git a/pci_v321/ssm.sp b/pci_v321/ssm.sp
deleted file mode 100644
index 0c72efd1..00000000
--- a/pci_v321/ssm.sp
+++ /dev/null
@@ -1,58 +0,0 @@
-locals {
- pci_v321_ssm_common_tags = merge(local.pci_v321_common_tags, {
- service = "AWS/SSM"
- })
-}
-
-benchmark "pci_v321_ssm" {
- title = "SSM"
- documentation = file("./pci_v321/docs/pci_v321_ssm.md")
- children = [
- control.pci_v321_ssm_1,
- control.pci_v321_ssm_2,
- control.pci_v321_ssm_3
- ]
-
- tags = merge(local.pci_v321_ssm_common_tags, {
- type = "Benchmark"
- })
-}
-
-control "pci_v321_ssm_1" {
- title = "1 Amazon EC2 instances managed by Systems Manager should have a patch compliance status of COMPLIANT after a patch installation"
- description = "This control checks whether the compliance status of the Amazon EC2 Systems Manager patch compliance is COMPLIANT or NON_COMPLIANT after the patch installation on the instance."
- severity = "medium"
- query = query.ssm_managed_instance_compliance_patch_compliant
- documentation = file("./pci_v321/docs/pci_v321_ssm_1.md")
-
- tags = merge(local.pci_v321_ssm_common_tags, {
- pci_item_id = "ssm_1"
- pci_requirements = "6.2"
- })
-}
-
-control "pci_v321_ssm_2" {
- title = "2 Instances managed by Systems Manager should have an association compliance status of COMPLIANT"
- description = "This control checks whether the status of the AWS Systems Manager association compliance is COMPLIANT or NON_COMPLIANT after the association is run on an instance. The control passes if the association compliance status is COMPLIANT."
- severity = "low"
- query = query.ssm_managed_instance_compliance_association_compliant
- documentation = file("./pci_v321/docs/pci_v321_ssm_2.md")
-
- tags = merge(local.pci_v321_ssm_common_tags, {
- pci_item_id = "ssm_2"
- pci_requirements = "2.4"
- })
-}
-
-control "pci_v321_ssm_3" {
- title = "3 EC2 instances should be managed by AWS Systems Manager"
- description = "This control checks whether the EC2 instances in your account are managed by Systems Manager."
- severity = "medium"
- query = query.ec2_instance_ssm_managed
- documentation = file("./pci_v321/docs/pci_v321_ssm_3.md")
-
- tags = merge(local.pci_v321_ssm_common_tags, {
- pci_item_id = "ssm_3"
- pci_requirements = "2.4,6.2"
- })
-}