diff --git a/avd_docs/aws/accessanalyzer/AVD-AWS-0175/docs.md b/avd_docs/aws/accessanalyzer/AVD-AWS-0175/docs.md index d5316ab1..5de9dde9 100644 --- a/avd_docs/aws/accessanalyzer/AVD-AWS-0175/docs.md +++ b/avd_docs/aws/accessanalyzer/AVD-AWS-0175/docs.md @@ -1,5 +1,4 @@ - AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data. Access Analyzer @@ -10,7 +9,7 @@ keys, AWS Lambda functions, and Amazon SQS(Simple Queue Service) queues. ### Impact -Reduced visibility of externally shared resources. + {{ remediationActions }} diff --git a/avd_docs/aws/athena/AVD-AWS-0006/docs.md b/avd_docs/aws/athena/AVD-AWS-0006/docs.md index 50b0475c..b18e1ba5 100644 --- a/avd_docs/aws/athena/AVD-AWS-0006/docs.md +++ b/avd_docs/aws/athena/AVD-AWS-0006/docs.md @@ -1,8 +1,9 @@ -Athena databases and workspace result sets should be encrypted at rests. These databases and query sets are generally derived from data in S3 buckets and should have the same level of at rest protection. +Data can be read if the Athena Database is compromised. Athena databases and workspace result sets should be encrypted at rests. These databases and query sets are generally derived from data in S3 buckets and should have the same level of at rest protection. + ### Impact -Data can be read if the Athena Database is compromised + {{ remediationActions }} diff --git a/avd_docs/aws/athena/AVD-AWS-0007/docs.md b/avd_docs/aws/athena/AVD-AWS-0007/docs.md index 17753ac2..868ece26 100644 --- a/avd_docs/aws/athena/AVD-AWS-0007/docs.md +++ b/avd_docs/aws/athena/AVD-AWS-0007/docs.md @@ -1,8 +1,9 @@ -Athena workgroup configuration should be enforced to prevent client side changes to disable encryption settings. +Clients can ignore encryption requirements without enforced configuration. Athena workgroup configuration should be enforced to prevent client side changes to disable encryption settings. + ### Impact -Clients can ignore encryption requirements + {{ remediationActions }} diff --git a/avd_docs/aws/cloudtrail/AVD-AWS-0014/docs.md b/avd_docs/aws/cloudtrail/AVD-AWS-0014/docs.md index c7964a24..3371c5d0 100644 --- a/avd_docs/aws/cloudtrail/AVD-AWS-0014/docs.md +++ b/avd_docs/aws/cloudtrail/AVD-AWS-0014/docs.md @@ -1,8 +1,9 @@ -When creating Cloudtrail in the AWS Management Console the trail is configured by default to be multi-region, this isn't the case with the Terraform resource. Cloudtrail should cover the full AWS account to ensure you can track changes in regions you are not actively operting in. +Activity could be happening in your account in a different region. When creating Cloudtrail in the AWS Management Console the trail is configured by default to be multi-region, this isn't the case with the Terraform resource. Cloudtrail should cover the full AWS account to ensure you can track changes in regions you are not actively operting in. + ### Impact -Activity could be happening in your account in a different region + {{ remediationActions }} diff --git a/avd_docs/aws/cloudtrail/AVD-AWS-0015/docs.md b/avd_docs/aws/cloudtrail/AVD-AWS-0015/docs.md index 88770c40..2575687b 100644 --- a/avd_docs/aws/cloudtrail/AVD-AWS-0015/docs.md +++ b/avd_docs/aws/cloudtrail/AVD-AWS-0015/docs.md @@ -1,8 +1,9 @@ -Using Customer managed keys provides comprehensive control over cryptographic keys, enabling management of policies, permissions, and rotation, thus enhancing security and compliance measures for sensitive data and systems. +Using AWS managed keys does not allow for fine grained control. Using Customer managed keys provides comprehensive control over cryptographic keys, enabling management of policies, permissions, and rotation, thus enhancing security and compliance measures for sensitive data and systems. + ### Impact -Using AWS managed keys does not allow for fine grained control + {{ remediationActions }} diff --git a/avd_docs/aws/cloudtrail/AVD-AWS-0016/docs.md b/avd_docs/aws/cloudtrail/AVD-AWS-0016/docs.md index b33a20ae..d07fc2c9 100644 --- a/avd_docs/aws/cloudtrail/AVD-AWS-0016/docs.md +++ b/avd_docs/aws/cloudtrail/AVD-AWS-0016/docs.md @@ -1,8 +1,9 @@ -Log validation should be activated on Cloudtrail logs to prevent the tampering of the underlying data in the S3 bucket. It is feasible that a rogue actor compromising an AWS account might want to modify the log data to remove trace of their actions. +Illicit activity could be removed from the logs. Log validation should be activated on Cloudtrail logs to prevent the tampering of the underlying data in the S3 bucket. It is feasible that a rogue actor compromising an AWS account might want to modify the log data to remove trace of their actions. + ### Impact -Illicit activity could be removed from the logs + {{ remediationActions }} diff --git a/avd_docs/aws/cloudtrail/AVD-AWS-0161/docs.md b/avd_docs/aws/cloudtrail/AVD-AWS-0161/docs.md index 6285a1b3..44368079 100644 --- a/avd_docs/aws/cloudtrail/AVD-AWS-0161/docs.md +++ b/avd_docs/aws/cloudtrail/AVD-AWS-0161/docs.md @@ -1,10 +1,9 @@ - -CloudTrail logs a record of every API call made in your account. These log files are stored in an S3 bucket. CIS recommends that the S3 bucket policy, or access control list (ACL), applied to the S3 bucket that CloudTrail logs to prevents public access to the CloudTrail logs. Allowing public access to CloudTrail log content might aid an adversary in identifying weaknesses in the affected account's use or configuration. +CloudTrail logs will be publicly exposed, potentially containing sensitive information. CloudTrail logs a record of every API call made in your account. These log files are stored in an S3 bucket. CIS recommends that the S3 bucket policy, or access control list (ACL), applied to the S3 bucket that CloudTrail logs to prevents public access to the CloudTrail logs. Allowing public access to CloudTrail log content might aid an adversary in identifying weaknesses in the affected account's use or configuration. ### Impact -CloudTrail logs will be publicly exposed, potentially containing sensitive information + {{ remediationActions }} diff --git a/avd_docs/aws/cloudtrail/AVD-AWS-0162/docs.md b/avd_docs/aws/cloudtrail/AVD-AWS-0162/docs.md index f525622c..4e5907c5 100644 --- a/avd_docs/aws/cloudtrail/AVD-AWS-0162/docs.md +++ b/avd_docs/aws/cloudtrail/AVD-AWS-0162/docs.md @@ -1,4 +1,5 @@ +Realtime log analysis is not available without enabling CloudWatch logging. CloudTrail is a web service that records AWS API calls made in a given account. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. @@ -8,7 +9,7 @@ For a trail that is enabled in all Regions in an account, CloudTrail sends log f ### Impact -Realtime log analysis is not available without enabling CloudWatch logging + {{ remediationActions }} diff --git a/avd_docs/aws/cloudtrail/AVD-AWS-0163/docs.md b/avd_docs/aws/cloudtrail/AVD-AWS-0163/docs.md index 78adcba9..f8cb1ac5 100644 --- a/avd_docs/aws/cloudtrail/AVD-AWS-0163/docs.md +++ b/avd_docs/aws/cloudtrail/AVD-AWS-0163/docs.md @@ -1,13 +1,11 @@ Amazon S3 bucket access logging generates a log that contains access records for each request made to your S3 bucket. An access log record contains details about the request, such as the request type, the resources specified in the request worked, and the time and date the request was processed. - CIS recommends that you enable bucket access logging on the CloudTrail S3 bucket. - By enabling S3 bucket logging on target S3 buckets, you can capture all events that might affect objects in a target bucket. Configuring logs to be placed in a separate bucket enables access to log information, which can be useful in security and incident response workflows. ### Impact -There is no way to determine the access to this bucket + {{ remediationActions }} diff --git a/avd_docs/aws/codebuild/AVD-AWS-0018/docs.md b/avd_docs/aws/codebuild/AVD-AWS-0018/docs.md index 61295520..8c9e4a20 100644 --- a/avd_docs/aws/codebuild/AVD-AWS-0018/docs.md +++ b/avd_docs/aws/codebuild/AVD-AWS-0018/docs.md @@ -1,8 +1,9 @@ All artifacts produced by your CodeBuild project pipeline should always be encrypted + ### Impact -CodeBuild project artifacts are unencrypted + {{ remediationActions }} diff --git a/avd_docs/aws/config/AVD-AWS-0019/docs.md b/avd_docs/aws/config/AVD-AWS-0019/docs.md index 4a4ce16a..eb3fa783 100644 --- a/avd_docs/aws/config/AVD-AWS-0019/docs.md +++ b/avd_docs/aws/config/AVD-AWS-0019/docs.md @@ -1,10 +1,10 @@ -The configuration aggregator should be configured with all_regions for the source. - +Sources that aren't covered by the aggregator are not include in the configuration. The configuration aggregator should be configured with all_regions for the source. This will help limit the risk of any unmonitored configuration in regions that are thought to be unused. + ### Impact -Sources that aren't covered by the aggregator are not include in the configuration + {{ remediationActions }} diff --git a/avd_docs/aws/documentdb/AVD-AWS-0020/docs.md b/avd_docs/aws/documentdb/AVD-AWS-0020/docs.md index f6f10534..4eeac450 100644 --- a/avd_docs/aws/documentdb/AVD-AWS-0020/docs.md +++ b/avd_docs/aws/documentdb/AVD-AWS-0020/docs.md @@ -1,8 +1,9 @@ Document DB does not have auditing by default. To ensure that you are able to accurately audit the usage of your DocumentDB cluster you should enable export logs. + ### Impact -Limited visibility of audit trail for changes to the DocumentDB + {{ remediationActions }} diff --git a/avd_docs/aws/documentdb/AVD-AWS-0021/docs.md b/avd_docs/aws/documentdb/AVD-AWS-0021/docs.md index 28798f39..24f0c5d1 100644 --- a/avd_docs/aws/documentdb/AVD-AWS-0021/docs.md +++ b/avd_docs/aws/documentdb/AVD-AWS-0021/docs.md @@ -1,8 +1,9 @@ -Encryption of the underlying storage used by DocumentDB ensures that if their is compromise of the disks, the data is still protected. +Unencrypted sensitive data is vulnerable to compromise. Encryption of the underlying storage used by DocumentDB ensures that if their is compromise of the disks, the data is still protected. + ### Impact -Unencrypted sensitive data is vulnerable to compromise. + {{ remediationActions }} diff --git a/avd_docs/aws/documentdb/AVD-AWS-0022/docs.md b/avd_docs/aws/documentdb/AVD-AWS-0022/docs.md index c013e4db..a2e329b7 100644 --- a/avd_docs/aws/documentdb/AVD-AWS-0022/docs.md +++ b/avd_docs/aws/documentdb/AVD-AWS-0022/docs.md @@ -1,8 +1,9 @@ -Encryption using AWS keys provides protection for your DocumentDB underlying storage. To increase control of the encryption and manage factors like rotation use customer managed keys. +Using AWS managed keys does not allow for fine grained control. Encryption using AWS keys provides protection for your DocumentDB underlying storage. To increase control of the encryption and manage factors like rotation use customer managed keys. + ### Impact -Using AWS managed keys does not allow for fine grained control + {{ remediationActions }} diff --git a/avd_docs/aws/dynamodb/AVD-AWS-0023/docs.md b/avd_docs/aws/dynamodb/AVD-AWS-0023/docs.md index 72d8cbf3..1b2a57ae 100644 --- a/avd_docs/aws/dynamodb/AVD-AWS-0023/docs.md +++ b/avd_docs/aws/dynamodb/AVD-AWS-0023/docs.md @@ -1,8 +1,9 @@ -Amazon DynamoDB Accelerator (DAX) encryption at rest provides an additional layer of data protection by helping secure your data from unauthorized access to the underlying storage. +Data can be freely read if compromised. Amazon DynamoDB Accelerator (DAX) encryption at rest provides an additional layer of data protection by helping secure your data from unauthorized access to the underlying storage. + ### Impact -Data can be freely read if compromised + {{ remediationActions }} diff --git a/avd_docs/aws/dynamodb/AVD-AWS-0024/docs.md b/avd_docs/aws/dynamodb/AVD-AWS-0024/docs.md index 0623a53c..c4251db4 100644 --- a/avd_docs/aws/dynamodb/AVD-AWS-0024/docs.md +++ b/avd_docs/aws/dynamodb/AVD-AWS-0024/docs.md @@ -1,10 +1,10 @@ DynamoDB tables should be protected against accidentally or malicious write/delete actions by ensuring that there is adequate protection. - By enabling point-in-time-recovery you can restore to a known point in the event of loss of data. + ### Impact -Accidental or malicious writes and deletes can't be rolled back + {{ remediationActions }} diff --git a/avd_docs/aws/dynamodb/AVD-AWS-0025/docs.md b/avd_docs/aws/dynamodb/AVD-AWS-0025/docs.md index d9bde7a8..8397b845 100644 --- a/avd_docs/aws/dynamodb/AVD-AWS-0025/docs.md +++ b/avd_docs/aws/dynamodb/AVD-AWS-0025/docs.md @@ -1,8 +1,9 @@ -DynamoDB tables are encrypted by default using AWS managed encryption keys. To increase control of the encryption and control the management of factors like key rotation, use a Customer Managed Key. +Using AWS managed keys does not allow for fine grained control. DynamoDB tables are encrypted by default using AWS managed encryption keys. To increase control of the encryption and control the management of factors like key rotation, use a Customer Managed Key. + ### Impact -Using AWS managed keys does not allow for fine grained control + {{ remediationActions }} diff --git a/checks/cloud/aws/accessanalyzer/enable_access_analyzer.go b/checks/cloud/aws/accessanalyzer/enable_access_analyzer.go index 77f5afdf..4453db45 100755 --- a/checks/cloud/aws/accessanalyzer/enable_access_analyzer.go +++ b/checks/cloud/aws/accessanalyzer/enable_access_analyzer.go @@ -34,7 +34,8 @@ keys, AWS Lambda functions, and Amazon SQS(Simple Queue Service) queues. Links: []string{ "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", }, - Severity: severity.Low, + Severity: severity.Low, + Deprecated: true, }, func(s *state.State) (results scan.Results) { var enabled bool diff --git a/checks/cloud/aws/accessanalyzer/enable_access_analyzer.rego b/checks/cloud/aws/accessanalyzer/enable_access_analyzer.rego new file mode 100644 index 00000000..eb467998 --- /dev/null +++ b/checks/cloud/aws/accessanalyzer/enable_access_analyzer.rego @@ -0,0 +1,45 @@ +# METADATA +# title: Enable IAM Access analyzer for IAM policies about all resources in each region. +# description: | +# AWS IAM Access Analyzer helps you identify the resources in your organization and +# accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. +# This lets you identify unintended access to your resources and data. Access Analyzer +# identifies resources that are shared with external principals by using logic-based reasoning +# to analyze the resource-based policies in your AWS environment. IAM Access Analyzer +# continuously monitors all policies for S3 bucket, IAM roles, KMS(Key Management Service) +# keys, AWS Lambda functions, and Amazon SQS(Simple Queue Service) queues. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html +# custom: +# id: AVD-AWS-0175 +# avd_id: AVD-AWS-0175 +# provider: aws +# service: accessanalyzer +# severity: LOW +# short_code: enable-access-analyzer +# recommended_action: Enable IAM Access analyzer across all regions. +# frameworks: +# cis-aws-1.4: +# - "1.20" +# input: +# selector: +# - type: cloud +# subtypes: +# - service: accessanalyzer +# provider: aws +package builtin.aws.accessanalyzer.aws0175 + +import rego.v1 + +deny contains res if { + not has_active_analyzer + res := result.new("Access Analyzer is not enabled.", {}) +} + +has_active_analyzer if { + some analyzer in input.aws.accessanalyzer.analyzers + analyzer.active.value +} diff --git a/checks/cloud/aws/accessanalyzer/enable_access_analyzer_test.go b/checks/cloud/aws/accessanalyzer/enable_access_analyzer_test.go deleted file mode 100644 index ecfedd49..00000000 --- a/checks/cloud/aws/accessanalyzer/enable_access_analyzer_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package accessanalyzer - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/accessanalyzer" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestASCheckNoSecretsInUserData(t *testing.T) { - tests := []struct { - name string - input accessanalyzer.AccessAnalyzer - expected bool - }{ - { - name: "No analyzers enabled", - input: accessanalyzer.AccessAnalyzer{}, - expected: true, - }, - { - name: "Analyzer disabled", - input: accessanalyzer.AccessAnalyzer{ - Analyzers: []accessanalyzer.Analyzer{ - { - Metadata: trivyTypes.NewTestMetadata(), - ARN: trivyTypes.String("arn:aws:accessanalyzer:us-east-1:123456789012:analyzer/test", trivyTypes.NewTestMetadata()), - Name: trivyTypes.String("test", trivyTypes.NewTestMetadata()), - Active: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: true, - }, - { - name: "Analyzer enabled", - input: accessanalyzer.AccessAnalyzer{ - Analyzers: []accessanalyzer.Analyzer{ - { - Metadata: trivyTypes.NewTestMetadata(), - ARN: trivyTypes.String("arn:aws:accessanalyzer:us-east-1:123456789012:analyzer/test", trivyTypes.NewTestMetadata()), - Name: trivyTypes.String("test", trivyTypes.NewTestMetadata()), - Active: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.AccessAnalyzer = test.input - results := CheckEnableAccessAnalyzer.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableAccessAnalyzer.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/accessanalyzer/enable_access_analyzer_test.rego b/checks/cloud/aws/accessanalyzer/enable_access_analyzer_test.rego new file mode 100644 index 00000000..1e37b2b2 --- /dev/null +++ b/checks/cloud/aws/accessanalyzer/enable_access_analyzer_test.rego @@ -0,0 +1,26 @@ +package builtin.aws.accessanalyzer.aws0175_test + +import rego.v1 + +import data.builtin.aws.accessanalyzer.aws0175 as check +import data.lib.test + +test_disallow_no_analyzers if { + r := check.deny with input as {"aws": {"accessanalyzer": {"analyzers": []}}} + test.assert_equal_message("Access Analyzer is not enabled.", r) +} + +test_disallow_analyzer_disabled if { + r := check.deny with input as {"aws": {"accessanalyzer": {"analyzers": [{"active": {"value": false}}]}}} + test.assert_equal_message("Access Analyzer is not enabled.", r) +} + +test_allow_one_of_analyzer_disabled if { + r := check.deny with input as {"aws": {"accessanalyzer": {"analyzers": [{"active": {"value": false}}, {"active": {"value": true}}]}}} + test.assert_empty(r) +} + +test_allow_analyzer_enabled if { + r := check.deny with input as {"aws": {"accessanalyzer": {"analyzers": [{"active": {"value": true}}]}}} + test.assert_empty(r) +} diff --git a/checks/cloud/aws/athena/enable_at_rest_encryption.go b/checks/cloud/aws/athena/enable_at_rest_encryption.go index 940db308..32d5d367 100755 --- a/checks/cloud/aws/athena/enable_at_rest_encryption.go +++ b/checks/cloud/aws/athena/enable_at_rest_encryption.go @@ -34,7 +34,8 @@ var CheckEnableAtRestEncryption = rules.Register( Links: cloudFormationEnableAtRestEncryptionLinks, RemediationMarkdown: cloudFormationEnableAtRestEncryptionRemediationMarkdown, }, - Severity: severity.High, + Severity: severity.High, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, workgroup := range s.AWS.Athena.Workgroups { diff --git a/checks/cloud/aws/athena/enable_at_rest_encryption.rego b/checks/cloud/aws/athena/enable_at_rest_encryption.rego new file mode 100644 index 00000000..15a90b64 --- /dev/null +++ b/checks/cloud/aws/athena/enable_at_rest_encryption.rego @@ -0,0 +1,53 @@ +# METADATA +# title: Athena databases and workgroup configurations are created unencrypted at rest by default, they should be encrypted +# description: | +# Data can be read if the Athena Database is compromised. Athena databases and workspace result sets should be encrypted at rests. These databases and query sets are generally derived from data in S3 buckets and should have the same level of at rest protection. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/athena/latest/ug/encryption.html +# custom: +# id: AVD-AWS-0006 +# avd_id: AVD-AWS-0006 +# provider: aws +# service: athena +# severity: HIGH +# short_code: enable-at-rest-encryption +# recommended_action: Enable encryption at rest for Athena databases and workgroup configurations +# input: +# selector: +# - type: cloud +# subtypes: +# - service: athena +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/athena_workgroup#encryption_configuration +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/athena_database#encryption_configuration +# good_examples: checks/cloud/aws/athena/enable_at_rest_encryption.tf.go +# bad_examples: checks/cloud/aws/athena/enable_at_rest_encryption.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/athena/enable_at_rest_encryption.cf.go +# bad_examples: checks/cloud/aws/athena/enable_at_rest_encryption.cf.go +package builtin.aws.athena.aws0006 + +import rego.v1 + +encryption_type_none := "" + +deny contains res if { + some workgroup in input.aws.athena.workgroups + is_encryption_type_none(workgroup.encryption) + res := result.new("Workgroup does not have encryption configured.", workgroup) +} + +deny contains res if { + some database in input.aws.athena.databases + is_encryption_type_none(database.encryption) + res := result.new("Database does not have encryption configured.", database) +} + +is_encryption_type_none(encryption) if { + encryption.type.value == encryption_type_none +} diff --git a/checks/cloud/aws/athena/enable_at_rest_encryption_test.go b/checks/cloud/aws/athena/enable_at_rest_encryption_test.go deleted file mode 100644 index 02127836..00000000 --- a/checks/cloud/aws/athena/enable_at_rest_encryption_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package athena - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/athena" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckEnableAtRestEncryption(t *testing.T) { - tests := []struct { - name string - input athena.Athena - expected bool - }{ - { - name: "AWS Athena database unencrypted", - input: athena.Athena{ - Databases: []athena.Database{ - { - Metadata: trivyTypes.NewTestMetadata(), - Encryption: athena.EncryptionConfiguration{ - Metadata: trivyTypes.NewTestMetadata(), - Type: trivyTypes.String(athena.EncryptionTypeNone, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: true, - }, - { - name: "AWS Athena workgroup unencrypted", - input: athena.Athena{ - Workgroups: []athena.Workgroup{ - { - Metadata: trivyTypes.NewTestMetadata(), - Encryption: athena.EncryptionConfiguration{ - Metadata: trivyTypes.NewTestMetadata(), - Type: trivyTypes.String(athena.EncryptionTypeNone, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: true, - }, - { - name: "AWS Athena database and workgroup encrypted", - input: athena.Athena{ - Databases: []athena.Database{ - { - Metadata: trivyTypes.NewTestMetadata(), - Encryption: athena.EncryptionConfiguration{ - Metadata: trivyTypes.NewTestMetadata(), - Type: trivyTypes.String(athena.EncryptionTypeSSEKMS, trivyTypes.NewTestMetadata()), - }, - }, - }, - Workgroups: []athena.Workgroup{ - { - Metadata: trivyTypes.NewTestMetadata(), - Encryption: athena.EncryptionConfiguration{ - Metadata: trivyTypes.NewTestMetadata(), - Type: trivyTypes.String(athena.EncryptionTypeSSEKMS, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.Athena = test.input - results := CheckEnableAtRestEncryption.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableAtRestEncryption.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/athena/enable_at_rest_encryption_test.rego b/checks/cloud/aws/athena/enable_at_rest_encryption_test.rego new file mode 100644 index 00000000..4272ac39 --- /dev/null +++ b/checks/cloud/aws/athena/enable_at_rest_encryption_test.rego @@ -0,0 +1,26 @@ +package builtin.aws.athena.aws0006_test + +import rego.v1 + +import data.builtin.aws.athena.aws0006 as check +import data.lib.test + +test_disallow_database_unencrypted if { + inp := {"aws": {"athena": {"databases": [{"encryption": {"type": {"value": ""}}}]}}} + test.assert_equal_message("Database does not have encryption configured.", check.deny) with input as inp +} + +test_disallow_workgroup_unencrypted if { + inp := {"aws": {"athena": {"workgroups": [{"encryption": {"type": {"value": ""}}}]}}} + test.assert_equal_message("Workgroup does not have encryption configured.", check.deny) with input as inp +} + +test_allow_database_encrypted if { + inp := {"aws": {"athena": {"databases": [{"encryption": {"type": {"value": "SSE_S3"}}}]}}} + test.assert_empty(check.deny) with input as inp +} + +test_allow_workgroup_encrypted if { + inp := {"aws": {"athena": {"workgroups": [{"encryption": {"type": {"value": "SSE_S3"}}}]}}} + test.assert_empty(check.deny) with input as inp +} diff --git a/checks/cloud/aws/athena/no_encryption_override.go b/checks/cloud/aws/athena/no_encryption_override.go index 54d94d01..ba40c161 100755 --- a/checks/cloud/aws/athena/no_encryption_override.go +++ b/checks/cloud/aws/athena/no_encryption_override.go @@ -33,7 +33,8 @@ var CheckNoEncryptionOverride = rules.Register( Links: cloudFormationNoEncryptionOverrideLinks, RemediationMarkdown: cloudFormationNoEncryptionOverrideRemediationMarkdown, }, - Severity: severity.High, + Severity: severity.High, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, workgroup := range s.AWS.Athena.Workgroups { diff --git a/checks/cloud/aws/athena/no_encryption_override.rego b/checks/cloud/aws/athena/no_encryption_override.rego new file mode 100644 index 00000000..c64ab962 --- /dev/null +++ b/checks/cloud/aws/athena/no_encryption_override.rego @@ -0,0 +1,40 @@ +# METADATA +# title: Athena workgroups should enforce configuration to prevent client disabling encryption +# description: | +# Clients can ignore encryption requirements without enforced configuration. Athena workgroup configuration should be enforced to prevent client side changes to disable encryption settings. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/athena/latest/ug/manage-queries-control-costs-with-workgroups.html +# custom: +# id: AVD-AWS-0007 +# avd_id: AVD-AWS-0007 +# provider: aws +# service: athena +# severity: HIGH +# short_code: no-encryption-override +# recommended_action: Enforce the configuration to prevent client overrides +# input: +# selector: +# - type: cloud +# subtypes: +# - service: athena +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/athena_workgroup#configuration +# good_examples: checks/cloud/aws/athena/no_encryption_override.tf.go +# bad_examples: checks/cloud/aws/athena/no_encryption_override.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/athena/no_encryption_override.cf.go +# bad_examples: checks/cloud/aws/athena/no_encryption_override.cf.go +package builtin.aws.athena.aws0007 + +import rego.v1 + +deny contains res if { + some workgroup in input.aws.athena.workgroups + not workgroup.enforceconfiguration.value + res := result.new("The workgroup configuration is not enforced.", workgroup.enforceconfiguration) +} diff --git a/checks/cloud/aws/athena/no_encryption_override_test.go b/checks/cloud/aws/athena/no_encryption_override_test.go deleted file mode 100644 index 55ec5241..00000000 --- a/checks/cloud/aws/athena/no_encryption_override_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package athena - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/athena" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckNoEncryptionOverride(t *testing.T) { - tests := []struct { - name string - input athena.Athena - expected bool - }{ - { - name: "AWS Athena workgroup doesn't enforce configuration", - input: athena.Athena{ - Workgroups: []athena.Workgroup{ - { - Metadata: trivyTypes.NewTestMetadata(), - EnforceConfiguration: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: true, - }, - { - name: "AWS Athena workgroup enforces configuration", - input: athena.Athena{ - Workgroups: []athena.Workgroup{ - { - Metadata: trivyTypes.NewTestMetadata(), - EnforceConfiguration: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.Athena = test.input - results := CheckNoEncryptionOverride.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckNoEncryptionOverride.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/athena/no_encryption_override_test.rego b/checks/cloud/aws/athena/no_encryption_override_test.rego new file mode 100644 index 00000000..55c8140d --- /dev/null +++ b/checks/cloud/aws/athena/no_encryption_override_test.rego @@ -0,0 +1,16 @@ +package builtin.aws.athena.aws0007_test + +import rego.v1 + +import data.builtin.aws.athena.aws0007 as check +import data.lib.test + +test_allow_workgroup_enforce_configuration if { + inp := {"aws": {"athena": {"workgroups": [{"enforceconfiguration": {"value": true}}]}}} + test.assert_empty(check.deny) with input as inp +} + +test_disallow_workgroup_no_enforce_configuration if { + inp := {"aws": {"athena": {"workgroups": [{"enforceconfiguration": {"value": false}}]}}} + test.assert_equal_message("The workgroup configuration is not enforced.", check.deny) with input as inp +} diff --git a/checks/cloud/aws/cloudtrail/enable_all_regions.go b/checks/cloud/aws/cloudtrail/enable_all_regions.go index 35cf183b..153ca0cf 100755 --- a/checks/cloud/aws/cloudtrail/enable_all_regions.go +++ b/checks/cloud/aws/cloudtrail/enable_all_regions.go @@ -38,7 +38,8 @@ var CheckEnableAllRegions = rules.Register( Links: cloudFormationEnableAllRegionsLinks, RemediationMarkdown: cloudFormationEnableAllRegionsRemediationMarkdown, }, - Severity: severity.Medium, + Severity: severity.Medium, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, trail := range s.AWS.CloudTrail.Trails { diff --git a/checks/cloud/aws/cloudtrail/enable_all_regions.rego b/checks/cloud/aws/cloudtrail/enable_all_regions.rego new file mode 100644 index 00000000..c11c8080 --- /dev/null +++ b/checks/cloud/aws/cloudtrail/enable_all_regions.rego @@ -0,0 +1,43 @@ +# METADATA +# title: Cloudtrail should be enabled in all regions regardless of where your AWS resources are generally homed +# description: | +# Activity could be happening in your account in a different region. When creating Cloudtrail in the AWS Management Console the trail is configured by default to be multi-region, this isn't the case with the Terraform resource. Cloudtrail should cover the full AWS account to ensure you can track changes in regions you are not actively operting in. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html +# custom: +# id: AVD-AWS-0014 +# avd_id: AVD-AWS-0014 +# provider: aws +# service: cloudtrail +# severity: MEDIUM +# short_code: enable-all-regions +# recommended_action: Enable Cloudtrail in all regions +# frameworks: +# cis-aws-1.2: +# - "2.5" +# input: +# selector: +# - type: cloud +# subtypes: +# - service: cloudtrail +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail#is_multi_region_trail +# good_examples: checks/cloud/aws/cloudtrail/enable_all_regions.tf.go +# bad_examples: checks/cloud/aws/cloudtrail/enable_all_regions.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/cloudtrail/enable_all_regions.cf.go +# bad_examples: checks/cloud/aws/cloudtrail/enable_all_regions.cf.go +package builtin.aws.cloudtrail.aws0014 + +import rego.v1 + +deny contains res if { + some trail in input.aws.cloudtrail.trails + not trail.ismultiregion.value + res := result.new("Trail is not enabled across all regions.", trail.ismultiregion) +} diff --git a/checks/cloud/aws/cloudtrail/enable_all_regions_test.go b/checks/cloud/aws/cloudtrail/enable_all_regions_test.go deleted file mode 100644 index 4ca1c625..00000000 --- a/checks/cloud/aws/cloudtrail/enable_all_regions_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package cloudtrail - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/cloudtrail" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckEnableAllRegions(t *testing.T) { - tests := []struct { - name string - input cloudtrail.CloudTrail - expected bool - }{ - { - name: "AWS CloudTrail not enabled across all regions", - input: cloudtrail.CloudTrail{ - Trails: []cloudtrail.Trail{ - { - Metadata: trivyTypes.NewTestMetadata(), - IsMultiRegion: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: true, - }, - { - name: "AWS CloudTrail enabled across all regions", - input: cloudtrail.CloudTrail{ - Trails: []cloudtrail.Trail{ - { - Metadata: trivyTypes.NewTestMetadata(), - IsMultiRegion: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.CloudTrail = test.input - results := CheckEnableAllRegions.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableAllRegions.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/cloudtrail/enable_all_regions_test.rego b/checks/cloud/aws/cloudtrail/enable_all_regions_test.rego new file mode 100644 index 00000000..c004db30 --- /dev/null +++ b/checks/cloud/aws/cloudtrail/enable_all_regions_test.rego @@ -0,0 +1,16 @@ +package builtin.aws.cloudtrail.aws0014_test + +import rego.v1 + +import data.builtin.aws.cloudtrail.aws0014 as check +import data.lib.test + +test_disallow_cloudtrail_without_all_regions if { + r := check.deny with input as {"aws": {"cloudtrail": {"trails": [{"ismultiregion": {"value": false}}]}}} + test.assert_equal_message("CloudTrail is not enabled across all regions.", r) +} + +test_allow_cloudtrail_with_all_regions if { + r := check.deny with input as {"aws": {"cloudtrail": {"trails": [{"ismultiregion": {"value": true}}]}}} + test.assert_empty(r) +} diff --git a/checks/cloud/aws/cloudtrail/enable_log_validation.go b/checks/cloud/aws/cloudtrail/enable_log_validation.go index 39ae7313..1afa5ecb 100755 --- a/checks/cloud/aws/cloudtrail/enable_log_validation.go +++ b/checks/cloud/aws/cloudtrail/enable_log_validation.go @@ -33,7 +33,8 @@ var CheckEnableLogValidation = rules.Register( Links: cloudFormationEnableLogValidationLinks, RemediationMarkdown: cloudFormationEnableLogValidationRemediationMarkdown, }, - Severity: severity.High, + Severity: severity.High, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, trail := range s.AWS.CloudTrail.Trails { diff --git a/checks/cloud/aws/cloudtrail/enable_log_validation.rego b/checks/cloud/aws/cloudtrail/enable_log_validation.rego new file mode 100644 index 00000000..f101c52d --- /dev/null +++ b/checks/cloud/aws/cloudtrail/enable_log_validation.rego @@ -0,0 +1,40 @@ +# METADATA +# title: Cloudtrail log validation should be enabled to prevent tampering of log data +# description: | +# Illicit activity could be removed from the logs. Log validation should be activated on Cloudtrail logs to prevent the tampering of the underlying data in the S3 bucket. It is feasible that a rogue actor compromising an AWS account might want to modify the log data to remove trace of their actions. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-file-validation-intro.html +# custom: +# id: AVD-AWS-0016 +# avd_id: AVD-AWS-0016 +# provider: aws +# service: cloudtrail +# severity: HIGH +# short_code: enable-log-validation +# recommended_action: Turn on log validation for Cloudtrail +# input: +# selector: +# - type: cloud +# subtypes: +# - service: cloudtrail +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail#enable_log_file_validation +# good_examples: checks/cloud/aws/cloudtrail/enable_log_validation.tf.go +# bad_examples: checks/cloud/aws/cloudtrail/enable_log_validation.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/cloudtrail/enable_log_validation.cf.go +# bad_examples: checks/cloud/aws/cloudtrail/enable_log_validation.cf.go +package builtin.aws.cloudtrail.aws0016 + +import rego.v1 + +deny contains res if { + some trail in input.aws.cloudtrail.trails + not trail.enablelogfilevalidation.value + res := result.new("Trail does not have log validation enabled.", trail.enablelogfilevalidation) +} diff --git a/checks/cloud/aws/cloudtrail/enable_log_validation_test.go b/checks/cloud/aws/cloudtrail/enable_log_validation_test.go deleted file mode 100644 index bfe1d465..00000000 --- a/checks/cloud/aws/cloudtrail/enable_log_validation_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package cloudtrail - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/cloudtrail" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckEnableLogValidation(t *testing.T) { - tests := []struct { - name string - input cloudtrail.CloudTrail - expected bool - }{ - { - name: "AWS CloudTrail without logfile validation", - input: cloudtrail.CloudTrail{ - Trails: []cloudtrail.Trail{ - { - Metadata: trivyTypes.NewTestMetadata(), - EnableLogFileValidation: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: true, - }, - { - name: "AWS CloudTrail with logfile validation enabled", - input: cloudtrail.CloudTrail{ - Trails: []cloudtrail.Trail{ - { - Metadata: trivyTypes.NewTestMetadata(), - EnableLogFileValidation: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.CloudTrail = test.input - results := CheckEnableLogValidation.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableLogValidation.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/cloudtrail/enable_log_validation_test.rego b/checks/cloud/aws/cloudtrail/enable_log_validation_test.rego new file mode 100644 index 00000000..7436046e --- /dev/null +++ b/checks/cloud/aws/cloudtrail/enable_log_validation_test.rego @@ -0,0 +1,16 @@ +package builtin.aws.cloudtrail.aws0016_test + +import rego.v1 + +import data.builtin.aws.cloudtrail.aws0016 as check +import data.lib.test + +test_allow_trail_with_log_validation if { + inp := {"aws": {"cloudtrail": {"trails": [{"enablelogfilevalidation": {"value": true}}]}}} + test.assert_empty(check.deny) with input as inp +} + +test_disallow_trail_without_log_validation if { + inp := {"aws": {"cloudtrail": {"trails": [{"enablelogfilevalidation": {"value": false}}]}}} + test.assert_equal_message("Trail does not have log validation enabled.", check.deny) with input as inp +} diff --git a/checks/cloud/aws/cloudtrail/encryption_customer_key.go b/checks/cloud/aws/cloudtrail/encryption_customer_key.go index 93f89602..5d39d117 100755 --- a/checks/cloud/aws/cloudtrail/encryption_customer_key.go +++ b/checks/cloud/aws/cloudtrail/encryption_customer_key.go @@ -34,7 +34,8 @@ var EncryptionCustomerManagedKey = rules.Register( Links: cloudFormationEncryptionCustomerManagedKeyLinks, RemediationMarkdown: ``, }, - Severity: severity.High, + Severity: severity.High, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, trail := range s.AWS.CloudTrail.Trails { diff --git a/checks/cloud/aws/cloudtrail/encryption_customer_key.rego b/checks/cloud/aws/cloudtrail/encryption_customer_key.rego new file mode 100644 index 00000000..16cbc173 --- /dev/null +++ b/checks/cloud/aws/cloudtrail/encryption_customer_key.rego @@ -0,0 +1,43 @@ +# METADATA +# title: CloudTrail should use Customer managed keys to encrypt the logs +# description: | +# Using AWS managed keys does not allow for fine grained control. Using Customer managed keys provides comprehensive control over cryptographic keys, enabling management of policies, permissions, and rotation, thus enhancing security and compliance measures for sensitive data and systems. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html +# - https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-mgmt +# custom: +# id: AVD-AWS-0015 +# avd_id: AVD-AWS-0015 +# provider: aws +# service: cloudtrail +# severity: HIGH +# short_code: encryption-customer-managed-key +# recommended_action: Use Customer managed key +# input: +# selector: +# - type: cloud +# subtypes: +# - service: cloudtrail +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail#kms_key_id +# good_examples: checks/cloud/aws/cloudtrail/encryption_customer_key.tf.go +# bad_examples: checks/cloud/aws/cloudtrail/encryption_customer_key.tf.go +# cloudformation: +# links: +# - https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudtrail-trail.html#cfn-cloudtrail-trail-kmskeyid +# good_examples: checks/cloud/aws/cloudtrail/encryption_customer_key.cf.go +# bad_examples: checks/cloud/aws/cloudtrail/encryption_customer_key.cf.go +package builtin.aws.cloudtrail.aws0015 + +import rego.v1 + +deny contains res if { + some trail in input.aws.cloudtrail.trails + trail.kmskeyid.value == "" + res := result.new("CloudTrail does not use a customer managed key to encrypt the logs.", trail.kmskeyid) +} diff --git a/checks/cloud/aws/cloudtrail/encryption_customer_key_test.go b/checks/cloud/aws/cloudtrail/encryption_customer_key_test.go deleted file mode 100644 index b0d3f61b..00000000 --- a/checks/cloud/aws/cloudtrail/encryption_customer_key_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package cloudtrail - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/cloudtrail" - "github.com/aquasecurity/trivy/pkg/iac/scan" - "github.com/aquasecurity/trivy/pkg/iac/state" - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" -) - -func TestEncryptionCustomerManagedKey(t *testing.T) { - tests := []struct { - name string - input cloudtrail.CloudTrail - expected bool - }{ - { - name: "AWS CloudTrail without CMK", - input: cloudtrail.CloudTrail{ - Trails: []cloudtrail.Trail{ - { - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: true, - }, - { - name: "AWS CloudTrail with CMK", - input: cloudtrail.CloudTrail{ - Trails: []cloudtrail.Trail{ - { - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String("some-kms-key", trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.CloudTrail = test.input - results := EncryptionCustomerManagedKey.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == EncryptionCustomerManagedKey.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/cloudtrail/encryption_customer_key_test.rego b/checks/cloud/aws/cloudtrail/encryption_customer_key_test.rego new file mode 100644 index 00000000..3005c9ba --- /dev/null +++ b/checks/cloud/aws/cloudtrail/encryption_customer_key_test.rego @@ -0,0 +1,16 @@ +package builtin.aws.cloudtrail.aws0015_test + +import rego.v1 + +import data.builtin.aws.cloudtrail.aws0015 as check +import data.lib.test + +test_allow_trail_with_cmk if { + inp := {"aws": {"cloudtrail": {"trails": [{"kmskeyid": {"value": "key-id"}}]}}} + test.assert_empty(check.deny) with input as inp +} + +test_disallow_trail_without_cmk if { + inp := {"aws": {"cloudtrail": {"trails": [{"kmskeyid": {"value": ""}}]}}} + test.assert_equal_message("CloudTrail does not use a customer managed key to encrypt the logs.", check.deny) with input as inp +} diff --git a/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.go b/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.go index 4f151796..969baa67 100755 --- a/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.go +++ b/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.go @@ -9,7 +9,7 @@ import ( "github.com/aquasecurity/trivy/pkg/iac/state" ) -var checkEnsureCloudwatchIntegration = rules.Register( +var CheckEnsureCloudwatchIntegration = rules.Register( scan.Rule{ AVDID: "AVD-AWS-0162", Provider: providers.AWSProvider, @@ -45,7 +45,8 @@ For a trail that is enabled in all Regions in an account, CloudTrail sends log f Links: cloudFormationEnsureCloudwatchIntegrationLinks, RemediationMarkdown: cloudFormationEnsureCloudwatchIntegrationRemediationMarkdown, }, - Severity: severity.Low, + Severity: severity.Low, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, trail := range s.AWS.CloudTrail.Trails { diff --git a/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.rego b/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.rego new file mode 100644 index 00000000..ca4964df --- /dev/null +++ b/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.rego @@ -0,0 +1,51 @@ +# METADATA +# title: CloudTrail logs should be stored in S3 and also sent to CloudWatch Logs +# description: | +# Realtime log analysis is not available without enabling CloudWatch logging. +# +# CloudTrail is a web service that records AWS API calls made in a given account. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. +# +# CloudTrail uses Amazon S3 for log file storage and delivery, so log files are stored durably. In addition to capturing CloudTrail logs in a specified Amazon S3 bucket for long-term analysis, you can perform real-time analysis by configuring CloudTrail to send logs to CloudWatch Logs. +# +# For a trail that is enabled in all Regions in an account, CloudTrail sends log files from all those Regions to a CloudWatch Logs log group. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html#send-cloudtrail-events-to-cloudwatch-logs-console +# custom: +# id: AVD-AWS-0162 +# avd_id: AVD-AWS-0162 +# provider: aws +# service: cloudtrail +# severity: LOW +# short_code: ensure-cloudwatch-integration +# recommended_action: Enable logging to CloudWatch +# frameworks: +# cis-aws-1.2: +# - "2.4" +# cis-aws-1.4: +# - "3.4" +# input: +# selector: +# - type: cloud +# subtypes: +# - service: cloudtrail +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail +# good_examples: checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.tf.go +# bad_examples: checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.cf.go +# bad_examples: checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.cf.go +package builtin.aws.cloudtrail.aws0162 + +import rego.v1 + +deny contains res if { + some trail in input.aws.cloudtrail.trails + trail.cloudwatchlogsloggrouparn.value == "" + res := result.new("Trail does not have CloudWatch logging configured", trail) +} diff --git a/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration_test.go b/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration_test.go deleted file mode 100644 index 3700afcb..00000000 --- a/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package cloudtrail - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/scan" - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/cloudtrail" - "github.com/stretchr/testify/assert" -) - -func TestCheckEnsureCloudwatchIntegration(t *testing.T) { - tests := []struct { - name string - input cloudtrail.CloudTrail - expected bool - }{ - { - name: "Trail has cloudwatch configured", - input: cloudtrail.CloudTrail{ - Trails: []cloudtrail.Trail{ - { - Metadata: trivyTypes.NewTestMetadata(), - CloudWatchLogsLogGroupArn: trivyTypes.String("arn:aws:logs:us-east-1:123456789012:log-group:my-log-group", trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: false, - }, - { - name: "Trail does not have cloudwatch configured", - input: cloudtrail.CloudTrail{ - Trails: []cloudtrail.Trail{ - { - Metadata: trivyTypes.NewTestMetadata(), - CloudWatchLogsLogGroupArn: trivyTypes.String("", trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: true, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.CloudTrail = test.input - results := checkEnsureCloudwatchIntegration.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == checkEnsureCloudwatchIntegration.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration_test.rego b/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration_test.rego new file mode 100644 index 00000000..c04d79ed --- /dev/null +++ b/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration_test.rego @@ -0,0 +1,16 @@ +package builtin.aws.cloudtrail.aws0162_test + +import rego.v1 + +import data.builtin.aws.cloudtrail.aws0162 as check +import data.lib.test + +test_allow_cloudwatch_integration if { + inp := {"aws": {"cloudtrail": {"trails": [{"cloudwatchlogsloggrouparn": {"value": "log-group-arn"}}]}}} + test.assert_empty(check.deny) with input as inp +} + +test_disallow_without_cloudwatch_integration if { + inp := {"aws": {"cloudtrail": {"trails": [{"cloudwatchlogsloggrouparn": {"value": ""}}]}}} + test.assert_equal_message("CloudWatch integration is not configured.", check.deny) with input as inp +} diff --git a/checks/cloud/aws/cloudtrail/no_public_log_access.go b/checks/cloud/aws/cloudtrail/no_public_log_access.go index 0b19e1f4..ac6cedd9 100755 --- a/checks/cloud/aws/cloudtrail/no_public_log_access.go +++ b/checks/cloud/aws/cloudtrail/no_public_log_access.go @@ -9,7 +9,7 @@ import ( "github.com/aquasecurity/trivy/pkg/iac/state" ) -var checkNoPublicLogAccess = rules.Register( +var CheckNoPublicLogAccess = rules.Register( scan.Rule{ AVDID: "AVD-AWS-0161", Provider: providers.AWSProvider, @@ -41,7 +41,8 @@ CloudTrail logs a record of every API call made in your account. These log files Links: cloudFormationNoPublicLogAccessLinks, RemediationMarkdown: cloudFormationNoPublicLogAccessRemediationMarkdown, }, - Severity: severity.Critical, + Severity: severity.Critical, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, trail := range s.AWS.CloudTrail.Trails { diff --git a/checks/cloud/aws/cloudtrail/no_public_log_access.rego b/checks/cloud/aws/cloudtrail/no_public_log_access.rego new file mode 100644 index 00000000..57a4352d --- /dev/null +++ b/checks/cloud/aws/cloudtrail/no_public_log_access.rego @@ -0,0 +1,52 @@ +# METADATA +# title: The S3 Bucket backing Cloudtrail should be private +# description: | +# CloudTrail logs will be publicly exposed, potentially containing sensitive information. CloudTrail logs a record of every API call made in your account. These log files are stored in an S3 bucket. CIS recommends that the S3 bucket policy, or access control list (ACL), applied to the S3 bucket that CloudTrail logs to prevents public access to the CloudTrail logs. Allowing public access to CloudTrail log content might aid an adversary in identifying weaknesses in the affected account's use or configuration. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/AmazonS3/latest/userguide/configuring-block-public-access-bucket.html +# custom: +# id: AVD-AWS-0161 +# avd_id: AVD-AWS-0161 +# provider: aws +# service: cloudtrail +# severity: CRITICAL +# short_code: no-public-log-access +# recommended_action: Restrict public access to the S3 bucket +# frameworks: +# cis-aws-1.2: +# - "2.3" +# cis-aws-1.4: +# - "3.3" +# input: +# selector: +# - type: cloud +# subtypes: +# - service: cloudtrail +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail#is_multi_region_trail +# good_examples: checks/cloud/aws/cloudtrail/no_public_log_access.tf.go +# bad_examples: checks/cloud/aws/cloudtrail/no_public_log_access.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/cloudtrail/no_public_log_access.cf.go +# bad_examples: checks/cloud/aws/cloudtrail/no_public_log_access.cf.go +package builtin.aws.cloudtrail.aws0161 + +import rego.v1 + +import data.lib.s3 + +deny contains res if { + some trail in input.aws.cloudtrail.trails + trail.bucketname.value != "" + + some bucket in input.aws.s3.buckets + bucket.name.value == trail.bucketname.value + + s3.bucket_has_public_access(bucket) + res := result.new("Trail S3 bucket is publicly exposed", bucket) +} diff --git a/checks/cloud/aws/cloudtrail/no_public_log_access_test.go b/checks/cloud/aws/cloudtrail/no_public_log_access_test.go deleted file mode 100644 index f5db0160..00000000 --- a/checks/cloud/aws/cloudtrail/no_public_log_access_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package cloudtrail - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/cloudtrail" - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/s3" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckNoPublicLogAccess(t *testing.T) { - tests := []struct { - name string - inputCT cloudtrail.CloudTrail - inputS3 s3.S3 - expected bool - }{ - { - name: "Trail has bucket with no public access", - inputCT: cloudtrail.CloudTrail{ - Trails: []cloudtrail.Trail{ - { - Metadata: trivyTypes.NewTestMetadata(), - BucketName: trivyTypes.String("my-bucket", trivyTypes.NewTestMetadata()), - }, - }, - }, - inputS3: s3.S3{ - Buckets: []s3.Bucket{ - { - Metadata: trivyTypes.NewTestMetadata(), - Name: trivyTypes.String("my-bucket", trivyTypes.NewTestMetadata()), - ACL: trivyTypes.String("private", trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: false, - }, - { - name: "Trail has bucket with public access", - inputCT: cloudtrail.CloudTrail{ - Trails: []cloudtrail.Trail{ - { - Metadata: trivyTypes.NewTestMetadata(), - BucketName: trivyTypes.String("my-bucket", trivyTypes.NewTestMetadata()), - }, - }, - }, - inputS3: s3.S3{ - Buckets: []s3.Bucket{ - { - Metadata: trivyTypes.NewTestMetadata(), - Name: trivyTypes.String("my-bucket", trivyTypes.NewTestMetadata()), - ACL: trivyTypes.String("public-read", trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: true, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.CloudTrail = test.inputCT - testState.AWS.S3 = test.inputS3 - results := checkNoPublicLogAccess.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == checkNoPublicLogAccess.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/cloudtrail/no_public_log_access_test.rego b/checks/cloud/aws/cloudtrail/no_public_log_access_test.rego new file mode 100644 index 00000000..4e76f6d3 --- /dev/null +++ b/checks/cloud/aws/cloudtrail/no_public_log_access_test.rego @@ -0,0 +1,24 @@ +package builtin.aws.cloudtrail.aws0161_test + +import rego.v1 + +import data.builtin.aws.cloudtrail.aws0161 as check +import data.lib.test + +test_allow_bucket_without_public_access if { + inp := {"aws": { + "cloudtrail": {"trails": [{"bucketname": {"value": "bucket_name"}}]}, + "s3": {"buckets": [{"name": {"value": "bucket_name"}, "acl": {"value": "private"}}]}, + }} + test.assert_empty(check.deny) with input as inp +} + +# TODO: count should be 2 +test_disallow_bucket_with_public_access if { + inp := {"aws": { + "cloudtrail": {"trails": [{"bucketname": {"value": "bucket_name"}}]}, + "s3": {"buckets": [{"name": {"value": "bucket_name"}, "acl": {"value": "public-read"}}]}, + }} + + test.assert_equal_message("Bucket has public access", check.deny) with input as inp +} diff --git a/checks/cloud/aws/cloudtrail/require_bucket_access_logging.go b/checks/cloud/aws/cloudtrail/require_bucket_access_logging.go index be4e6b04..e181f7b7 100755 --- a/checks/cloud/aws/cloudtrail/require_bucket_access_logging.go +++ b/checks/cloud/aws/cloudtrail/require_bucket_access_logging.go @@ -9,7 +9,7 @@ import ( "github.com/aquasecurity/trivy/pkg/iac/state" ) -var checkBucketAccessLoggingRequired = rules.Register( +var CheckBucketAccessLoggingRequired = rules.Register( scan.Rule{ AVDID: "AVD-AWS-0163", Provider: providers.AWSProvider, @@ -44,7 +44,8 @@ By enabling S3 bucket logging on target S3 buckets, you can capture all events t Links: cloudFormationBucketAccessLoggingRequiredLinks, RemediationMarkdown: cloudFormationBucketAccessLoggingRequiredRemediationMarkdown, }, - Severity: severity.Low, + Severity: severity.Low, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, trail := range s.AWS.CloudTrail.Trails { diff --git a/checks/cloud/aws/cloudtrail/require_bucket_access_logging.rego b/checks/cloud/aws/cloudtrail/require_bucket_access_logging.rego new file mode 100644 index 00000000..512cbef2 --- /dev/null +++ b/checks/cloud/aws/cloudtrail/require_bucket_access_logging.rego @@ -0,0 +1,52 @@ +# METADATA +# title: You should enable bucket access logging on the CloudTrail S3 bucket. +# description: | +# Amazon S3 bucket access logging generates a log that contains access records for each request made to your S3 bucket. An access log record contains details about the request, such as the request type, the resources specified in the request worked, and the time and date the request was processed. +# CIS recommends that you enable bucket access logging on the CloudTrail S3 bucket. +# By enabling S3 bucket logging on target S3 buckets, you can capture all events that might affect objects in a target bucket. Configuring logs to be placed in a separate bucket enables access to log information, which can be useful in security and incident response workflows. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html +# custom: +# id: AVD-AWS-0163 +# avd_id: AVD-AWS-0163 +# provider: aws +# service: cloudtrail +# severity: LOW +# short_code: require-bucket-access-logging +# recommended_action: Enable access logging on the bucket +# frameworks: +# cis-aws-1.2: +# - "2.6" +# cis-aws-1.4: +# - "3.6" +# input: +# selector: +# - type: cloud +# subtypes: +# - service: cloudtrail +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail#is_multi_region_trail +# good_examples: checks/cloud/aws/cloudtrail/require_bucket_access_logging.tf.go +# bad_examples: checks/cloud/aws/cloudtrail/require_bucket_access_logging.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/cloudtrail/require_bucket_access_logging.cf.go +# bad_examples: checks/cloud/aws/cloudtrail/require_bucket_access_logging.cf.go +package builtin.aws.cloudtrail.aws0163 + +import rego.v1 + +deny contains res if { + some trail in input.aws.cloudtrail.trails + trail.bucketname.value != "" + + some bucket in input.aws.s3.buckets + bucket.name.value == trail.bucketname.value + not bucket.logging.enabled.value + + res := result.new("Trail S3 bucket does not have logging enabled", bucket) +} diff --git a/checks/cloud/aws/cloudtrail/require_bucket_access_logging_test.go b/checks/cloud/aws/cloudtrail/require_bucket_access_logging_test.go deleted file mode 100644 index 60b89080..00000000 --- a/checks/cloud/aws/cloudtrail/require_bucket_access_logging_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package cloudtrail - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/cloudtrail" - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/s3" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckBucketAccessLoggingRequired(t *testing.T) { - tests := []struct { - name string - inputCT cloudtrail.CloudTrail - inputS3 s3.S3 - expected bool - }{ - { - name: "Trail has bucket with logging enabled", - inputCT: cloudtrail.CloudTrail{ - Trails: []cloudtrail.Trail{ - { - Metadata: trivyTypes.NewTestMetadata(), - BucketName: trivyTypes.String("my-bucket", trivyTypes.NewTestMetadata()), - }, - }, - }, - inputS3: s3.S3{ - Buckets: []s3.Bucket{ - { - Metadata: trivyTypes.NewTestMetadata(), - Name: trivyTypes.String("my-bucket", trivyTypes.NewTestMetadata()), - Logging: s3.Logging{ - Metadata: trivyTypes.NewTestMetadata(), - Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: false, - }, - { - name: "Trail has bucket without logging enabled", - inputCT: cloudtrail.CloudTrail{ - Trails: []cloudtrail.Trail{ - { - Metadata: trivyTypes.NewTestMetadata(), - BucketName: trivyTypes.String("my-bucket", trivyTypes.NewTestMetadata()), - }, - }, - }, - inputS3: s3.S3{ - Buckets: []s3.Bucket{ - { - Metadata: trivyTypes.NewTestMetadata(), - Name: trivyTypes.String("my-bucket", trivyTypes.NewTestMetadata()), - Logging: s3.Logging{ - Metadata: trivyTypes.NewTestMetadata(), - Enabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: true, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.CloudTrail = test.inputCT - testState.AWS.S3 = test.inputS3 - results := checkBucketAccessLoggingRequired.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == checkBucketAccessLoggingRequired.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/cloudtrail/require_bucket_access_logging_test.rego b/checks/cloud/aws/cloudtrail/require_bucket_access_logging_test.rego new file mode 100644 index 00000000..5b09f1ef --- /dev/null +++ b/checks/cloud/aws/cloudtrail/require_bucket_access_logging_test.rego @@ -0,0 +1,33 @@ +package builtin.aws.cloudtrail.aws0163_test + +import rego.v1 + +import data.builtin.aws.cloudtrail.aws0163 as check +import data.lib.test + +test_allow_bucket_with_logging_enabled if { + inp := {"aws": { + "cloudtrail": {"trails": [{"bucketname": {"value": "bucket1"}}]}, + "s3": {"buckets": [{ + "name": {"value": "bucket1"}, + "logging": {"enabled": {"value": true}}, + }]}, + }} + + test.assert_empty(check.deny) with input as inp +} + +test_disallow_bucket_with_logging_disabled if { + inp := {"aws": { + "cloudtrail": {"trails": [{"bucketname": {"value": "bucket1"}}]}, + "s3": {"buckets": [{ + "name": {"value": "bucket1"}, + "logging": {"enabled": {"value": false}}, + }]}, + }} + + test.assert_equal_message( + "Trail S3 bucket does not have logging enabled", + check.deny, + ) with input as inp +} diff --git a/checks/cloud/aws/codebuild/enable_encryption.go b/checks/cloud/aws/codebuild/enable_encryption.go index bb0fca17..921ce2d4 100755 --- a/checks/cloud/aws/codebuild/enable_encryption.go +++ b/checks/cloud/aws/codebuild/enable_encryption.go @@ -34,7 +34,8 @@ var CheckEnableEncryption = rules.Register( Links: cloudFormationEnableEncryptionLinks, RemediationMarkdown: cloudFormationEnableEncryptionRemediationMarkdown, }, - Severity: severity.High, + Severity: severity.High, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, project := range s.AWS.CodeBuild.Projects { diff --git a/checks/cloud/aws/codebuild/enable_encryption.rego b/checks/cloud/aws/codebuild/enable_encryption.rego new file mode 100644 index 00000000..fd37bab4 --- /dev/null +++ b/checks/cloud/aws/codebuild/enable_encryption.rego @@ -0,0 +1,49 @@ +# METADATA +# title: CodeBuild Project artifacts encryption should not be disabled +# description: | +# All artifacts produced by your CodeBuild project pipeline should always be encrypted +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codebuild-project-artifacts.html +# - https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codebuild-project.html +# custom: +# id: AVD-AWS-0018 +# avd_id: AVD-AWS-0018 +# provider: aws +# service: codebuild +# severity: HIGH +# short_code: enable-encryption +# recommended_action: Enable encryption for CodeBuild project artifacts +# input: +# selector: +# - type: cloud +# subtypes: +# - service: codebuild +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/codebuild_project#encryption_disabled +# good_examples: checks/cloud/aws/codebuild/enable_encryption.tf.go +# bad_examples: checks/cloud/aws/codebuild/enable_encryption.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/codebuild/enable_encryption.cf.go +# bad_examples: checks/cloud/aws/codebuild/enable_encryption.cf.go +package builtin.aws.codebuild.aws0018 + +import rego.v1 + +deny contains res if { + some project in input.aws.codebuild.projects + encryptionenabled := project.artifactsettings.encryptionenabled + not encryptionenabled.value + res := result.new("Encryption is not enabled for project artifacts.", encryptionenabled) +} + +deny contains res if { + some project in input.aws.codebuild.projects + some setting in project.secondaryartifactsettings + not setting.encryptionenabled.value + res := result.new("Encryption is not enabled for secondary project artifacts.", setting.encryptionenabled) +} diff --git a/checks/cloud/aws/codebuild/enable_encryption_test.go b/checks/cloud/aws/codebuild/enable_encryption_test.go deleted file mode 100644 index 15493589..00000000 --- a/checks/cloud/aws/codebuild/enable_encryption_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package codebuild - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/codebuild" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckEnableEncryption(t *testing.T) { - tests := []struct { - name string - input codebuild.CodeBuild - expected bool - }{ - { - name: "AWS Codebuild project with unencrypted artifact", - input: codebuild.CodeBuild{ - Projects: []codebuild.Project{ - { - Metadata: trivyTypes.NewTestMetadata(), - ArtifactSettings: codebuild.ArtifactSettings{ - Metadata: trivyTypes.NewTestMetadata(), - EncryptionEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: true, - }, - { - name: "AWS Codebuild project with unencrypted secondary artifact", - input: codebuild.CodeBuild{ - Projects: []codebuild.Project{ - { - Metadata: trivyTypes.NewTestMetadata(), - ArtifactSettings: codebuild.ArtifactSettings{ - Metadata: trivyTypes.NewTestMetadata(), - EncryptionEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - SecondaryArtifactSettings: []codebuild.ArtifactSettings{ - { - Metadata: trivyTypes.NewTestMetadata(), - EncryptionEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - }, - expected: true, - }, - { - name: "AWS Codebuild with encrypted artifacts", - input: codebuild.CodeBuild{ - Projects: []codebuild.Project{ - { - Metadata: trivyTypes.NewTestMetadata(), - ArtifactSettings: codebuild.ArtifactSettings{ - Metadata: trivyTypes.NewTestMetadata(), - EncryptionEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - SecondaryArtifactSettings: []codebuild.ArtifactSettings{ - { - Metadata: trivyTypes.NewTestMetadata(), - EncryptionEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.CodeBuild = test.input - results := CheckEnableEncryption.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableEncryption.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/codebuild/enable_encryption_test.rego b/checks/cloud/aws/codebuild/enable_encryption_test.rego new file mode 100644 index 00000000..a5ea58b1 --- /dev/null +++ b/checks/cloud/aws/codebuild/enable_encryption_test.rego @@ -0,0 +1,24 @@ +package builtin.aws.codebuild.aws0018_test + +import rego.v1 + +import data.builtin.aws.codebuild.aws0018 as check +import data.lib.test + +test_allow_artifact_settings_with_encryption if { + test.assert_empty(check.deny) with input as build_input({"artifactsettings": {"encryptionenabled": {"value": true}}}) +} + +test_allow_secondary_artifact_settings_with_encryption if { + test.assert_empty(check.deny) with input as build_input({"secondaryartifactsettings": [{"encryptionenabled": {"value": true}}]}) +} + +test_disallow_artifact_settings_without_encryption if { + test.assert_equal_message("Encryption is not enabled for project artifacts.", check.deny) with input as build_input({"artifactsettings": {"encryptionenabled": {"value": false}}}) +} + +test_disallow_secondary_artifact_settings_without_encryption if { + test.assert_equal_message("Encryption is not enabled for secondary project artifacts.", check.deny) with input as build_input({"secondaryartifactsettings": [{"encryptionenabled": {"value": false}}]}) +} + +build_input(project) := {"aws": {"codebuild": {"projects": [project]}}} diff --git a/checks/cloud/aws/config/aggregate_all_regions.go b/checks/cloud/aws/config/aggregate_all_regions.go index c534b942..1a9c987e 100755 --- a/checks/cloud/aws/config/aggregate_all_regions.go +++ b/checks/cloud/aws/config/aggregate_all_regions.go @@ -35,7 +35,8 @@ This will help limit the risk of any unmonitored configuration in regions that a Links: cloudFormationAggregateAllRegionsLinks, RemediationMarkdown: cloudFormationAggregateAllRegionsRemediationMarkdown, }, - Severity: severity.High, + Severity: severity.High, + Deprecated: true, }, func(s *state.State) (results scan.Results) { if s.AWS.Config.ConfigurationAggregrator.Metadata.IsUnmanaged() { diff --git a/checks/cloud/aws/config/aggregate_all_regions.rego b/checks/cloud/aws/config/aggregate_all_regions.rego new file mode 100644 index 00000000..5c5ec7c9 --- /dev/null +++ b/checks/cloud/aws/config/aggregate_all_regions.rego @@ -0,0 +1,42 @@ +# METADATA +# title: Config configuration aggregator should be using all regions for source +# description: | +# Sources that aren't covered by the aggregator are not include in the configuration. The configuration aggregator should be configured with all_regions for the source. +# This will help limit the risk of any unmonitored configuration in regions that are thought to be unused. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/config/latest/developerguide/aggregate-data.html +# custom: +# id: AVD-AWS-0019 +# avd_id: AVD-AWS-0019 +# provider: aws +# service: config +# severity: HIGH +# short_code: aggregate-all-regions +# recommended_action: Set the aggregator to cover all regions +# input: +# selector: +# - type: cloud +# subtypes: +# - service: config +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/config_configuration_aggregator#all_regions +# good_examples: checks/cloud/aws/config/aggregate_all_regions.tf.go +# bad_examples: checks/cloud/aws/config/aggregate_all_regions.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/config/aggregate_all_regions.cf.go +# bad_examples: checks/cloud/aws/config/aggregate_all_regions.cf.go +package builtin.aws.config.aws0019 + +import rego.v1 + +deny contains res if { + cfg_aggregator := input.aws.config.configurationaggregrator + cfg_aggregator.__defsec_metadata.managed + not cfg_aggregator.sourceallregions.value + res := result.new("Configuration aggregation is not set to source from all regions.", cfg_aggregator.sourceallregions) +} diff --git a/checks/cloud/aws/config/aggregate_all_regions_test.go b/checks/cloud/aws/config/aggregate_all_regions_test.go deleted file mode 100644 index af2b6d0e..00000000 --- a/checks/cloud/aws/config/aggregate_all_regions_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package config - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/config" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckAggregateAllRegions(t *testing.T) { - tests := []struct { - name string - input config.Config - expected bool - }{ - { - name: "AWS Config aggregator source with all regions set to false", - input: config.Config{ - ConfigurationAggregrator: config.ConfigurationAggregrator{ - Metadata: trivyTypes.NewTestMetadata(), - SourceAllRegions: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), - }, - }, - expected: true, - }, - { - name: "AWS Config aggregator source with all regions set to true", - input: config.Config{ - ConfigurationAggregrator: config.ConfigurationAggregrator{ - Metadata: trivyTypes.NewTestMetadata(), - SourceAllRegions: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.Config = test.input - results := CheckAggregateAllRegions.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckAggregateAllRegions.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/config/aggregate_all_regions_test.rego b/checks/cloud/aws/config/aggregate_all_regions_test.rego new file mode 100644 index 00000000..854ec618 --- /dev/null +++ b/checks/cloud/aws/config/aggregate_all_regions_test.rego @@ -0,0 +1,20 @@ +package builtin.aws.config.aws0019_test + +import rego.v1 + +import data.builtin.aws.config.aws0019 as check +import data.lib.test + +test_allow_all_regions if { + test.assert_empty(check.deny) with input as {"aws": {"config": {"configurationaggregrator": { + "__defsec_metadata": {"managed": true}, + "sourceallregions": {"value": true}, + }}}} +} + +test_disallow_all_regions if { + test.assert_equal_message("Configuration aggregation is not set to source from all regions.", check.deny) with input as {"aws": {"config": {"configurationaggregrator": { + "__defsec_metadata": {"managed": true}, + "sourceallregions": {"value": false}, + }}}} +} diff --git a/checks/cloud/aws/documentdb/enable_log_export.go b/checks/cloud/aws/documentdb/enable_log_export.go index 47d41f38..889d33b9 100755 --- a/checks/cloud/aws/documentdb/enable_log_export.go +++ b/checks/cloud/aws/documentdb/enable_log_export.go @@ -34,7 +34,8 @@ var CheckEnableLogExport = rules.Register( Links: cloudFormationEnableLogExportLinks, RemediationMarkdown: cloudFormationEnableLogExportRemediationMarkdown, }, - Severity: severity.Medium, + Severity: severity.Medium, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, cluster := range s.AWS.DocumentDB.Clusters { diff --git a/checks/cloud/aws/documentdb/enable_log_export.rego b/checks/cloud/aws/documentdb/enable_log_export.rego new file mode 100644 index 00000000..e0cbd385 --- /dev/null +++ b/checks/cloud/aws/documentdb/enable_log_export.rego @@ -0,0 +1,49 @@ +# METADATA +# title: DocumentDB logs export should be enabled +# description: | +# Document DB does not have auditing by default. To ensure that you are able to accurately audit the usage of your DocumentDB cluster you should enable export logs. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/documentdb/latest/developerguide/event-auditing.html +# custom: +# id: AVD-AWS-0020 +# avd_id: AVD-AWS-0020 +# provider: aws +# service: documentdb +# severity: MEDIUM +# short_code: enable-log-export +# recommended_action: Enable export logs +# input: +# selector: +# - type: cloud +# subtypes: +# - service: documentdb +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/docdb_cluster#enabled_cloudwatch_logs_exports +# good_examples: checks/cloud/aws/documentdb/enable_log_export.tf.go +# bad_examples: checks/cloud/aws/documentdb/enable_log_export.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/documentdb/enable_log_export.cf.go +# bad_examples: checks/cloud/aws/documentdb/enable_log_export.cf.go +package builtin.aws.documentdb.aws0020 + +import rego.v1 + +log_export_audit := "audit" + +log_export_profiler := "profiler" + +deny contains res if { + some cluster in input.aws.documentdb.clusters + not export_audit_or_profiler(cluster) + res := result.new("Neither CloudWatch audit nor profiler log exports are enabled.", cluster) +} + +export_audit_or_profiler(cluster) if { + some log in cluster.enabledlogexports + log.value in [log_export_audit, log_export_profiler] +} diff --git a/checks/cloud/aws/documentdb/enable_log_export_test.go b/checks/cloud/aws/documentdb/enable_log_export_test.go deleted file mode 100644 index 9fd21b5a..00000000 --- a/checks/cloud/aws/documentdb/enable_log_export_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package documentdb - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/documentdb" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckEnableLogExport(t *testing.T) { - tests := []struct { - name string - input documentdb.DocumentDB - expected bool - }{ - { - name: "DocDB Cluster not exporting logs", - input: documentdb.DocumentDB{ - Clusters: []documentdb.Cluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - EnabledLogExports: []trivyTypes.StringValue{ - trivyTypes.String("", trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: true, - }, - { - name: "DocDB Cluster exporting audit logs", - input: documentdb.DocumentDB{ - Clusters: []documentdb.Cluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - EnabledLogExports: []trivyTypes.StringValue{ - trivyTypes.String(documentdb.LogExportAudit, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: false, - }, - { - name: "DocDB Cluster exporting profiler logs", - input: documentdb.DocumentDB{ - Clusters: []documentdb.Cluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - EnabledLogExports: []trivyTypes.StringValue{ - trivyTypes.String(documentdb.LogExportProfiler, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.DocumentDB = test.input - results := CheckEnableLogExport.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableLogExport.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/documentdb/enable_log_export_test.rego b/checks/cloud/aws/documentdb/enable_log_export_test.rego new file mode 100644 index 00000000..95cb64a1 --- /dev/null +++ b/checks/cloud/aws/documentdb/enable_log_export_test.rego @@ -0,0 +1,26 @@ +package builtin.aws.documentdb.aws0020_test + +import rego.v1 + +import data.builtin.aws.documentdb.aws0020 as check +import data.lib.test + +test_disallow_no_export_log if { + inp := {"aws": {"documentdb": {"clusters": [{"enabledlogexports": []}]}}} + test.assert_equal_message("Neither CloudWatch audit nor profiler log exports are enabled.", check.deny) with input as inp +} + +test_allow_export_audit if { + inp := {"aws": {"documentdb": {"clusters": [{"enabledlogexports": [{"value": "audit"}]}]}}} + test.assert_empty(check.deny) with input as inp +} + +test_allow_export_profiler if { + inp := {"aws": {"documentdb": {"clusters": [{"enabledlogexports": [{"value": "profiler"}]}]}}} + test.assert_empty(check.deny) with input as inp +} + +test_allow_export_mixed if { + inp := {"aws": {"documentdb": {"clusters": [{"enabledlogexports": [{"value": "audit"}, {"value": "profiler"}]}]}}} + test.assert_empty(check.deny) with input as inp +} diff --git a/checks/cloud/aws/documentdb/enable_storage_encryption.go b/checks/cloud/aws/documentdb/enable_storage_encryption.go index ba8eb653..c34c98d5 100755 --- a/checks/cloud/aws/documentdb/enable_storage_encryption.go +++ b/checks/cloud/aws/documentdb/enable_storage_encryption.go @@ -31,7 +31,8 @@ var CheckEnableStorageEncryption = rules.Register( Links: cloudFormationEnableStorageEncryptionLinks, RemediationMarkdown: cloudFormationEnableStorageEncryptionRemediationMarkdown, }, - Severity: severity.High, + Severity: severity.High, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, cluster := range s.AWS.DocumentDB.Clusters { diff --git a/checks/cloud/aws/documentdb/enable_storage_encryption.rego b/checks/cloud/aws/documentdb/enable_storage_encryption.rego new file mode 100644 index 00000000..a7810613 --- /dev/null +++ b/checks/cloud/aws/documentdb/enable_storage_encryption.rego @@ -0,0 +1,40 @@ +# METADATA +# title: DocumentDB storage must be encrypted +# description: | +# Unencrypted sensitive data is vulnerable to compromise. Encryption of the underlying storage used by DocumentDB ensures that if their is compromise of the disks, the data is still protected. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/documentdb/latest/developerguide/encryption-at-rest.html +# custom: +# id: AVD-AWS-0021 +# avd_id: AVD-AWS-0021 +# provider: aws +# service: documentdb +# severity: HIGH +# short_code: enable-storage-encryption +# recommended_action: Enable storage encryption +# input: +# selector: +# - type: cloud +# subtypes: +# - service: documentdb +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/docdb_cluster#storage_encrypted +# good_examples: checks/cloud/aws/documentdb/enable_storage_encryption.tf.go +# bad_examples: checks/cloud/aws/documentdb/enable_storage_encryption.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/documentdb/enable_storage_encryption.cf.go +# bad_examples: checks/cloud/aws/documentdb/enable_storage_encryption.cf.go +package builtin.aws.documentdb.aws0021 + +import rego.v1 + +deny contains res if { + some cluster in input.aws.documentdb.clusters + not cluster.storageencrypted.value + res := result.new("Cluster storage does not have encryption enabled.", cluster.storageencrypted) +} diff --git a/checks/cloud/aws/documentdb/enable_storage_encryption_test.go b/checks/cloud/aws/documentdb/enable_storage_encryption_test.go deleted file mode 100644 index 7b289cd7..00000000 --- a/checks/cloud/aws/documentdb/enable_storage_encryption_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package documentdb - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/documentdb" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckEnableStorageEncryption(t *testing.T) { - tests := []struct { - name string - input documentdb.DocumentDB - expected bool - }{ - { - name: "DocDB unencrypted storage", - input: documentdb.DocumentDB{ - Clusters: []documentdb.Cluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - StorageEncrypted: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: true, - }, - { - name: "DocDB encrypted storage", - input: documentdb.DocumentDB{ - Clusters: []documentdb.Cluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - StorageEncrypted: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.DocumentDB = test.input - results := CheckEnableStorageEncryption.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableStorageEncryption.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/documentdb/enable_storage_encryption_test.rego b/checks/cloud/aws/documentdb/enable_storage_encryption_test.rego new file mode 100644 index 00000000..399a2796 --- /dev/null +++ b/checks/cloud/aws/documentdb/enable_storage_encryption_test.rego @@ -0,0 +1,17 @@ +package builtin.aws.documentdb.aws0021_test + +import rego.v1 + +import data.builtin.aws.documentdb.aws0021 as check +import data.lib.test + +test_allow_with_encryption if { + inp := {"aws": {"documentdb": {"clusters": [{"storageencrypted": {"value": true}}]}}} + test.assert_empty(check.deny) with input as inp +} + +test_disallow_without_encryption if { + inp := {"aws": {"documentdb": {"clusters": [{"storageencrypted": {"value": false}}]}}} + + test.assert_equal_message("Cluster storage does not have encryption enabled.", check) with input as inp +} diff --git a/checks/cloud/aws/documentdb/encryption_customer_key.go b/checks/cloud/aws/documentdb/encryption_customer_key.go index 4ba0ebd5..c23f6376 100755 --- a/checks/cloud/aws/documentdb/encryption_customer_key.go +++ b/checks/cloud/aws/documentdb/encryption_customer_key.go @@ -31,7 +31,8 @@ var CheckEncryptionCustomerKey = rules.Register( Links: cloudFormationEncryptionCustomerKeyLinks, RemediationMarkdown: cloudFormationEncryptionCustomerKeyRemediationMarkdown, }, - Severity: severity.Low, + Severity: severity.Low, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, cluster := range s.AWS.DocumentDB.Clusters { diff --git a/checks/cloud/aws/documentdb/encryption_customer_key.rego b/checks/cloud/aws/documentdb/encryption_customer_key.rego new file mode 100644 index 00000000..4f95d554 --- /dev/null +++ b/checks/cloud/aws/documentdb/encryption_customer_key.rego @@ -0,0 +1,49 @@ +# METADATA +# title: DocumentDB encryption should use Customer Managed Keys +# description: | +# Using AWS managed keys does not allow for fine grained control. Encryption using AWS keys provides protection for your DocumentDB underlying storage. To increase control of the encryption and manage factors like rotation use customer managed keys. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/documentdb/latest/developerguide/security.encryption.ssl.public-key.html +# custom: +# id: AVD-AWS-0022 +# avd_id: AVD-AWS-0022 +# provider: aws +# service: documentdb +# severity: LOW +# short_code: encryption-customer-key +# recommended_action: Enable encryption using customer managed keys +# input: +# selector: +# - type: cloud +# subtypes: +# - service: documentdb +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/docdb_cluster#kms_key_id +# good_examples: checks/cloud/aws/documentdb/encryption_customer_key.tf.go +# bad_examples: checks/cloud/aws/documentdb/encryption_customer_key.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/documentdb/encryption_customer_key.cf.go +# bad_examples: checks/cloud/aws/documentdb/encryption_customer_key.cf.go +package builtin.aws.documentdb.aws0022 + +import rego.v1 + +deny contains res if { + some cluster in input.aws.documentdb.clusters + cluster.kmskeyid.value == "" + + res := result.new("Cluster encryption does not use a customer-managed KMS key.", cluster) +} + +deny contains res if { + some cluster in input.aws.documentdb.clusters + some instance in cluster.instances + instance.kmskeyid.value == "" + + res := result.new("Instance encryption does not use a customer-managed KMS key.", cluster) +} diff --git a/checks/cloud/aws/documentdb/encryption_customer_key_test.go b/checks/cloud/aws/documentdb/encryption_customer_key_test.go deleted file mode 100644 index 86f1c1f2..00000000 --- a/checks/cloud/aws/documentdb/encryption_customer_key_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package documentdb - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/documentdb" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckEncryptionCustomerKey(t *testing.T) { - tests := []struct { - name string - input documentdb.DocumentDB - expected bool - }{ - { - name: "DocDB Cluster encryption missing KMS key", - input: documentdb.DocumentDB{ - Clusters: []documentdb.Cluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: true, - }, - { - name: "DocDB Instance encryption missing KMS key", - input: documentdb.DocumentDB{ - Clusters: []documentdb.Cluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String("kms-key", trivyTypes.NewTestMetadata()), - Instances: []documentdb.Instance{ - { - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - }, - expected: true, - }, - { - name: "DocDB Cluster and Instance encrypted with proper KMS keys", - input: documentdb.DocumentDB{ - Clusters: []documentdb.Cluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String("kms-key", trivyTypes.NewTestMetadata()), - Instances: []documentdb.Instance{ - { - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String("kms-key", trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.DocumentDB = test.input - results := CheckEncryptionCustomerKey.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEncryptionCustomerKey.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/documentdb/encryption_customer_key_test.rego b/checks/cloud/aws/documentdb/encryption_customer_key_test.rego new file mode 100644 index 00000000..107a4113 --- /dev/null +++ b/checks/cloud/aws/documentdb/encryption_customer_key_test.rego @@ -0,0 +1,30 @@ +package builtin.aws.documentdb.aws0022_test + +import rego.v1 + +import data.builtin.aws.documentdb.aws0022 as check +import data.lib.test + +test_allow_cluster_with_kms_key if { + inp := {"aws": {"documentdb": {"clusters": [{"kmskeyid": {"value": "test"}}]}}} + + test.assert_empty(check.deny) with input as inp +} + +test_allow_instance_with_kms_key if { + inp := {"aws": {"documentdb": {"clusters": [{"instances": [{"kmskeyid": {"value": "test"}}]}]}}} + + test.assert_empty(check.deny) with input as inp +} + +test_disallow_cluster_without_kms_key if { + inp := {"aws": {"documentdb": {"clusters": [{"kmskeyid": {"value": ""}}]}}} + + test.assert_equal_message("Cluster encryption does not use a customer-managed KMS key.", check.deny) with input as inp +} + +test_disallow_instance_without_kms_key if { + inp := {"aws": {"documentdb": {"clusters": [{"instances": [{"kmskeyid": {"value": ""}}]}]}}} + + test.assert_equal_message("Instance encryption does not use a customer-managed KMS key.", check.deny) with input as inp +} diff --git a/checks/cloud/aws/dynamodb/enable_at_rest_encryption.go b/checks/cloud/aws/dynamodb/enable_at_rest_encryption.go index b0cac3be..444b0d47 100755 --- a/checks/cloud/aws/dynamodb/enable_at_rest_encryption.go +++ b/checks/cloud/aws/dynamodb/enable_at_rest_encryption.go @@ -34,7 +34,8 @@ var CheckEnableAtRestEncryption = rules.Register( Links: cloudFormationEnableAtRestEncryptionLinks, RemediationMarkdown: cloudFormationEnableAtRestEncryptionRemediationMarkdown, }, - Severity: severity.High, + Severity: severity.High, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, cluster := range s.AWS.DynamoDB.DAXClusters { diff --git a/checks/cloud/aws/dynamodb/enable_at_rest_encryption.rego b/checks/cloud/aws/dynamodb/enable_at_rest_encryption.rego new file mode 100644 index 00000000..2ce4ea7e --- /dev/null +++ b/checks/cloud/aws/dynamodb/enable_at_rest_encryption.rego @@ -0,0 +1,41 @@ +# METADATA +# title: DAX Cluster should always encrypt data at rest +# description: | +# Data can be freely read if compromised. Amazon DynamoDB Accelerator (DAX) encryption at rest provides an additional layer of data protection by helping secure your data from unauthorized access to the underlying storage. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DAXEncryptionAtRest.html +# - https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dax-cluster.html +# custom: +# id: AVD-AWS-0023 +# avd_id: AVD-AWS-0023 +# provider: aws +# service: dynamodb +# severity: HIGH +# short_code: enable-at-rest-encryption +# recommended_action: Enable encryption at rest for DAX Cluster +# input: +# selector: +# - type: cloud +# subtypes: +# - service: dynamodb +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/dax_cluster#server_side_encryption +# good_examples: checks/cloud/aws/dynamodb/enable_at_rest_encryption.tf.go +# bad_examples: checks/cloud/aws/dynamodb/enable_at_rest_encryption.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/dynamodb/enable_at_rest_encryption.cf.go +# bad_examples: checks/cloud/aws/dynamodb/enable_at_rest_encryption.cf.go +package builtin.aws.dynamodb.aws0023 + +import rego.v1 + +deny contains res if { + some cluster in input.aws.dynamodb.daxclusters + cluster.serversideencryption.enabled.value == false + res := result.new("DAX encryption is not enabled.", cluster.serversideencryption.enabled) +} diff --git a/checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.go b/checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.go deleted file mode 100644 index 66c02a1b..00000000 --- a/checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package dynamodb - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/dynamodb" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckEnableAtRestEncryption(t *testing.T) { - tests := []struct { - name string - input dynamodb.DynamoDB - expected bool - }{ - { - name: "Cluster with SSE disabled", - input: dynamodb.DynamoDB{ - DAXClusters: []dynamodb.DAXCluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - ServerSideEncryption: dynamodb.ServerSideEncryption{ - Metadata: trivyTypes.NewTestMetadata(), - Enabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: true, - }, - { - name: "Cluster with SSE enabled", - input: dynamodb.DynamoDB{ - DAXClusters: []dynamodb.DAXCluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - ServerSideEncryption: dynamodb.ServerSideEncryption{ - Metadata: trivyTypes.NewTestMetadata(), - Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.DynamoDB = test.input - results := CheckEnableAtRestEncryption.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableAtRestEncryption.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.rego b/checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.rego new file mode 100644 index 00000000..237e2f20 --- /dev/null +++ b/checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.rego @@ -0,0 +1,18 @@ +package builtin.aws.dynamodb.aws0023_test + +import rego.v1 + +import data.builtin.aws.dynamodb.aws0023 as check +import data.lib.test + +test_allow_with_encryption if { + inp := {"aws": {"dynamodb": {"daxclusters": [{"serversideencryption": {"enabled": {"value": true}}}]}}} + + test.assert_empty(check.deny) with input as inp +} + +test_disallow_without_encryption if { + inp := {"aws": {"dynamodb": {"daxclusters": [{"serversideencryption": {"enabled": {"value": false}}}]}}} + + test.assert_equal_message("DAX encryption is not enabled.", check.deny) with input as inp +} diff --git a/checks/cloud/aws/dynamodb/enable_recovery.go b/checks/cloud/aws/dynamodb/enable_recovery.go index 8fa5e687..0cf2b6dc 100755 --- a/checks/cloud/aws/dynamodb/enable_recovery.go +++ b/checks/cloud/aws/dynamodb/enable_recovery.go @@ -29,7 +29,8 @@ By enabling point-in-time-recovery you can restore to a known point in the event Links: terraformEnableRecoveryLinks, RemediationMarkdown: terraformEnableRecoveryRemediationMarkdown, }, - Severity: severity.Medium, + Severity: severity.Medium, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, cluster := range s.AWS.DynamoDB.DAXClusters { diff --git a/checks/cloud/aws/dynamodb/enable_recovery.rego b/checks/cloud/aws/dynamodb/enable_recovery.rego new file mode 100644 index 00000000..3e19dca2 --- /dev/null +++ b/checks/cloud/aws/dynamodb/enable_recovery.rego @@ -0,0 +1,46 @@ +# METADATA +# title: Point in time recovery should be enabled to protect DynamoDB table +# description: | +# DynamoDB tables should be protected against accidentally or malicious write/delete actions by ensuring that there is adequate protection. +# By enabling point-in-time-recovery you can restore to a known point in the event of loss of data. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/PointInTimeRecovery.html +# custom: +# id: AVD-AWS-0024 +# avd_id: AVD-AWS-0024 +# provider: aws +# service: dynamodb +# severity: MEDIUM +# short_code: enable-recovery +# recommended_action: Enable point in time recovery +# input: +# selector: +# - type: cloud +# subtypes: +# - service: dynamodb +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/dynamodb_table#point_in_time_recovery +# good_examples: checks/cloud/aws/dynamodb/enable_recovery.tf.go +# bad_examples: checks/cloud/aws/dynamodb/enable_recovery.tf.go +package builtin.aws.dynamodb.aws0024 + +import rego.v1 + +deny contains res if { + some cluster in input.aws.dynamodb.daxclusters + cluster.pointintimerecovery.value == false + + res := result.new("Point-in-time recovery is not enabled.", cluster.pointintimerecovery) +} + +deny contains res if { + some table in input.aws.dynamodb.tables + table.pointintimerecovery.value == false + + res := result.new("Point-in-time recovery is not enabled.", table.pointintimerecovery) +} diff --git a/checks/cloud/aws/dynamodb/enable_recovery_test.go b/checks/cloud/aws/dynamodb/enable_recovery_test.go deleted file mode 100644 index 9df6d104..00000000 --- a/checks/cloud/aws/dynamodb/enable_recovery_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package dynamodb - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/dynamodb" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckEnableRecovery(t *testing.T) { - tests := []struct { - name string - input dynamodb.DynamoDB - expected bool - }{ - { - name: "Cluster with point in time recovery disabled", - input: dynamodb.DynamoDB{ - DAXClusters: []dynamodb.DAXCluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - PointInTimeRecovery: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: true, - }, - { - name: "Cluster with point in time recovery enabled", - input: dynamodb.DynamoDB{ - DAXClusters: []dynamodb.DAXCluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - PointInTimeRecovery: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.DynamoDB = test.input - results := CheckEnableRecovery.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableRecovery.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/dynamodb/enable_recovery_test.rego b/checks/cloud/aws/dynamodb/enable_recovery_test.rego new file mode 100644 index 00000000..e73a8d09 --- /dev/null +++ b/checks/cloud/aws/dynamodb/enable_recovery_test.rego @@ -0,0 +1,30 @@ +package builtin.aws.dynamodb.aws0024_test + +import rego.v1 + +import data.builtin.aws.dynamodb.aws0024 as check +import data.lib.test + +test_allow_cluster_with_recovery if { + inp := {"aws": {"dynamodb": {"tables": [{"pointintimerecovery": {"value": true}}]}}} + + test.assert_empty(check.deny) with input as inp +} + +test_deny_cluster_without_recovery if { + inp := {"aws": {"dynamodb": {"tables": [{"pointintimerecovery": {"value": false}}]}}} + + test.assert_equal_message("Point-in-time recovery is not enabled.", check.deny) with input as inp +} + +test_allow_table_with_recovery if { + inp := {"aws": {"dynamodb": {"tables": [{"pointintimerecovery": {"value": true}}]}}} + + test.assert_empty(check.deny) with input as inp +} + +test_deny_table_without_recovery if { + inp := {"aws": {"dynamodb": {"tables": [{"pointintimerecovery": {"value": false}}]}}} + + test.assert_equal_message("Point-in-time recovery is not enabled.", check.deny) with input as inp +} diff --git a/checks/cloud/aws/dynamodb/table_customer_key.go b/checks/cloud/aws/dynamodb/table_customer_key.go index 643e3bdd..e0dbca69 100755 --- a/checks/cloud/aws/dynamodb/table_customer_key.go +++ b/checks/cloud/aws/dynamodb/table_customer_key.go @@ -28,7 +28,8 @@ var CheckTableCustomerKey = rules.Register( Links: terraformTableCustomerKeyLinks, RemediationMarkdown: terraformTableCustomerKeyRemediationMarkdown, }, - Severity: severity.Low, + Severity: severity.Low, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, table := range s.AWS.DynamoDB.Tables { diff --git a/checks/cloud/aws/dynamodb/table_customer_key.rego b/checks/cloud/aws/dynamodb/table_customer_key.rego new file mode 100644 index 00000000..a762aebc --- /dev/null +++ b/checks/cloud/aws/dynamodb/table_customer_key.rego @@ -0,0 +1,49 @@ +# METADATA +# title: DynamoDB tables should use at rest encryption with a Customer Managed Key +# description: | +# Using AWS managed keys does not allow for fine grained control. DynamoDB tables are encrypted by default using AWS managed encryption keys. To increase control of the encryption and control the management of factors like key rotation, use a Customer Managed Key. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/EncryptionAtRest.html +# custom: +# id: AVD-AWS-0025 +# avd_id: AVD-AWS-0025 +# provider: aws +# service: dynamodb +# severity: LOW +# short_code: table-customer-key +# recommended_action: Enable server side encryption with a customer managed key +# input: +# selector: +# - type: cloud +# subtypes: +# - service: dynamodb +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/dynamodb_table#server_side_encryption +# good_examples: checks/cloud/aws/dynamodb/table_customer_key.tf.go +# bad_examples: checks/cloud/aws/dynamodb/table_customer_key.tf.go +package builtin.aws.dynamodb.aws0025 + +import rego.v1 + +deny contains res if { + some table in input.aws.dynamodb.tables + table.serversideencryption.enabled.value == false + res := result.new("Table encryption does not use a customer-managed KMS key.", table.serversideencryption.enabled) +} + +deny contains res if { + some table in input.aws.dynamodb.tables + table.serversideencryption.enabled.value + not valid_key(table.serversideencryption.kmskeyid.value) + res := result.new("Table encryption explicitly uses the default KMS key.", table.serversideencryption.kmskeyid) +} + +valid_key(k) if { + k != "" + k != "alias/aws/dynamodb" +} diff --git a/checks/cloud/aws/dynamodb/table_customer_key_test.go b/checks/cloud/aws/dynamodb/table_customer_key_test.go deleted file mode 100644 index 56daa731..00000000 --- a/checks/cloud/aws/dynamodb/table_customer_key_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package dynamodb - -import ( - "testing" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/dynamodb" - "github.com/aquasecurity/trivy/pkg/iac/scan" - "github.com/aquasecurity/trivy/pkg/iac/state" - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - "github.com/stretchr/testify/assert" -) - -func TestCheckTableCustomerKey(t *testing.T) { - tests := []struct { - name string - input dynamodb.DynamoDB - expected bool - }{ - { - name: "Cluster encryption missing KMS key", - input: dynamodb.DynamoDB{ - Tables: []dynamodb.Table{ - { - Metadata: trivyTypes.NewTestMetadata(), - ServerSideEncryption: dynamodb.ServerSideEncryption{ - Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: true, - }, - { - name: "Cluster encryption using default KMS key", - input: dynamodb.DynamoDB{ - Tables: []dynamodb.Table{ - { - Metadata: trivyTypes.NewTestMetadata(), - ServerSideEncryption: dynamodb.ServerSideEncryption{ - Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String(dynamodb.DefaultKMSKeyID, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: true, - }, - { - name: "Cluster encryption using proper KMS key", - input: dynamodb.DynamoDB{ - Tables: []dynamodb.Table{ - { - Metadata: trivyTypes.NewTestMetadata(), - ServerSideEncryption: dynamodb.ServerSideEncryption{ - Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String("some-ok-key", trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: false, - }, - { - name: "KMS key exist, but SSE is not enabled", - input: dynamodb.DynamoDB{ - Tables: []dynamodb.Table{ - { - Metadata: trivyTypes.NewTestMetadata(), - ServerSideEncryption: dynamodb.ServerSideEncryption{ - Enabled: trivyTypes.BoolDefault(false, trivyTypes.NewTestMetadata()), - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String("some-ok-key", trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: true, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.DynamoDB = test.input - results := CheckTableCustomerKey.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckTableCustomerKey.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/dynamodb/table_customer_key_test.rego b/checks/cloud/aws/dynamodb/table_customer_key_test.rego new file mode 100644 index 00000000..74b24f7c --- /dev/null +++ b/checks/cloud/aws/dynamodb/table_customer_key_test.rego @@ -0,0 +1,42 @@ +package builtin.aws.dynamodb.aws0025_test + +import rego.v1 + +import data.builtin.aws.dynamodb.aws0025 as check +import data.lib.test + +test_allow_table_with_cmk if { + inp := {"aws": {"dynamodb": {"tables": [{ + "name": "test", + "serversideencryption": { + "enabled": {"value": true}, + "kmskeyid": {"value": "alias/test"}, + }, + }]}}} + + test.assert_empty(check.deny) with input as inp +} + +test_deny_table_without_cmk if { + inp := {"aws": {"dynamodb": {"tables": [{ + "name": "test", + "serversideencryption": { + "enabled": {"value": true}, + "kmskeyid": {"value": ""}, + }, + }]}}} + + test.assert_equal_message("Table encryption explicitly uses the default KMS key.", check.deny) with input as inp +} + +test_deny_table_sse_disabled if { + inp := {"aws": {"dynamodb": {"tables": [{ + "name": "test", + "serversideencryption": { + "enabled": {"value": false}, + "kmskeyid": {"value": ""}, + }, + }]}}} + + test.assert_equal_message("Table encryption explicitly uses the default KMS key.", check.deny) with input as inp +} diff --git a/cmd/command_id/main.go b/cmd/command_id/main.go index 395d2f34..802614e2 100644 --- a/cmd/command_id/main.go +++ b/cmd/command_id/main.go @@ -7,7 +7,7 @@ import ( "strings" trivy_checks "github.com/aquasecurity/trivy-checks" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) const ( diff --git a/go.mod b/go.mod index e40360ff..400a744e 100644 --- a/go.mod +++ b/go.mod @@ -11,9 +11,9 @@ require ( github.com/liamg/memoryfs v1.6.0 github.com/open-policy-agent/opa v0.67.0 github.com/owenrumney/squealer v1.2.2 + github.com/samber/lo v1.39.0 github.com/stretchr/testify v1.9.0 github.com/testcontainers/testcontainers-go v0.31.0 - gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 mvdan.cc/sh/v3 v3.8.0 ) @@ -133,7 +133,6 @@ require ( github.com/rivo/uniseg v0.2.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect - github.com/samber/lo v1.39.0 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/shirou/gopsutil/v3 v3.24.2 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect @@ -188,6 +187,7 @@ require ( google.golang.org/protobuf v1.34.2 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/utils v0.0.0-20231127182322-b307cd553661 // indirect oras.land/oras-go/v2 v2.3.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/lib/cloud/s3.rego b/lib/cloud/s3.rego new file mode 100644 index 00000000..20497051 --- /dev/null +++ b/lib/cloud/s3.rego @@ -0,0 +1,11 @@ +package lib.s3 + +import rego.v1 + +public_acls = {"public-read", "public-read-write", "website", "authenticated-read"} + +bucket_has_public_access(bucket) if { + bucket.acl.value in public_acls + not bucket.publicaccessblock.ignorepublicacls.value + not bucket.publicaccessblock.blockpublicacls.value +} diff --git a/lib/test/test.rego b/lib/test/test.rego new file mode 100644 index 00000000..8e9ceb59 --- /dev/null +++ b/lib/test/test.rego @@ -0,0 +1,37 @@ +package lib.test + +import rego.v1 + +assert_empty(v) if { + not _assert_not_empty(v) +} + +_assert_not_empty(v) if { + count(v) > 0 + trace_and_print(sprintf("assert_not_empty:\n %v", [v])) +} + +assert_equal_message(expected, results) if { + assert_count(results, 1) + not _assert_not_equal_message(results, expected) +} + +_assert_not_equal_message(expected, results) if { + msg := [res.msg | some res in results][0] + msg != expected + trace_and_print(sprintf("assert_equal_message:\n Got %q\n Expected %q", [msg, expected])) +} + +assert_count(results, expected) if { + not _assert_not_count(results, expected) +} + +_assert_not_count(results, expected) if { + count(results) != expected + trace_and_print(sprintf("assert_count:\n Got %v\n Expected %v", [count(results), expected])) +} + +trace_and_print(v) if { + trace(v) + print(v) +} diff --git a/scripts/bundle.sh b/scripts/bundle.sh index f6effa1a..57845840 100755 --- a/scripts/bundle.sh +++ b/scripts/bundle.sh @@ -17,7 +17,7 @@ for dir in kubernetes cloud docker; do done -for dir in kubernetes docker; do +for dir in kubernetes docker cloud test; do mkdir -p bundle/policies/$dir/lib rsync -avr --exclude="*_test.rego" --exclude="*.go" lib/$dir/* bundle/policies/$dir/lib done diff --git a/test/rego_checks_test.go b/test/rego_checks_test.go new file mode 100644 index 00000000..e4970655 --- /dev/null +++ b/test/rego_checks_test.go @@ -0,0 +1,744 @@ +package test + +import ( + "context" + "testing" + + checks "github.com/aquasecurity/trivy-checks" + "github.com/aquasecurity/trivy/pkg/iac/providers/aws" + "github.com/aquasecurity/trivy/pkg/iac/providers/aws/accessanalyzer" + "github.com/aquasecurity/trivy/pkg/iac/providers/aws/athena" + "github.com/aquasecurity/trivy/pkg/iac/providers/aws/cloudtrail" + "github.com/aquasecurity/trivy/pkg/iac/providers/aws/codebuild" + "github.com/aquasecurity/trivy/pkg/iac/providers/aws/config" + "github.com/aquasecurity/trivy/pkg/iac/providers/aws/documentdb" + "github.com/aquasecurity/trivy/pkg/iac/providers/aws/dynamodb" + "github.com/aquasecurity/trivy/pkg/iac/providers/aws/s3" + "github.com/aquasecurity/trivy/pkg/iac/rego" + "github.com/aquasecurity/trivy/pkg/iac/rules" + "github.com/aquasecurity/trivy/pkg/iac/scan" + "github.com/aquasecurity/trivy/pkg/iac/state" + trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" + ruleTypes "github.com/aquasecurity/trivy/pkg/iac/types/rules" + "github.com/samber/lo" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func scanState(t *testing.T, regoScanner *rego.Scanner, s state.State, checkID string, expected bool) { + results, err := regoScanner.ScanInput(context.TODO(), rego.Input{ + Contents: s.ToRego(), + }) + require.NoError(t, err) + + var found bool + for _, result := range results { + if result.Status() == scan.StatusFailed && result.Rule().AVDID == checkID { + found = true + } + } + + if expected { + assert.True(t, found, "Rule should have been found") + } else { + assert.False(t, found, "Rule should not have been found") + } +} + +func TestAWSRegoChecks(t *testing.T) { + type testCase struct { + name string + input state.State + expected bool + } + + tests := map[string][]testCase{ + "AVD-AWS-0175": { + // TODO: Trivy does not export empty structures into Rego + // { + + // name: "No analyzers enabled", + // input: state.State{AWS: aws.AWS{AccessAnalyzer: accessanalyzer.AccessAnalyzer{}}}, + // expected: true, + // }, + { + name: "Analyzer disabled", + input: state.State{AWS: aws.AWS{AccessAnalyzer: accessanalyzer.AccessAnalyzer{ + Analyzers: []accessanalyzer.Analyzer{ + { + Metadata: trivyTypes.NewTestMetadata(), + ARN: trivyTypes.String("arn:aws:accessanalyzer:us-east-1:123456789012:analyzer/test", trivyTypes.NewTestMetadata()), + Name: trivyTypes.String("test", trivyTypes.NewTestMetadata()), + Active: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), + }, + }, + }}}, + expected: true, + }, + { + name: "Analyzer enabled", + input: state.State{AWS: aws.AWS{AccessAnalyzer: accessanalyzer.AccessAnalyzer{ + Analyzers: []accessanalyzer.Analyzer{ + { + Metadata: trivyTypes.NewTestMetadata(), + ARN: trivyTypes.String("arn:aws:accessanalyzer:us-east-1:123456789012:analyzer/test", trivyTypes.NewTestMetadata()), + Name: trivyTypes.String("test", trivyTypes.NewTestMetadata()), + Active: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), + }, + }}}, + }, + expected: false, + }, + }, + "AVD-AWS-0006": { + { + name: "AWS Athena database unencrypted", + input: state.State{AWS: aws.AWS{Athena: athena.Athena{ + Databases: []athena.Database{ + { + Metadata: trivyTypes.NewTestMetadata(), + Encryption: athena.EncryptionConfiguration{ + Metadata: trivyTypes.NewTestMetadata(), + Type: trivyTypes.String(athena.EncryptionTypeNone, trivyTypes.NewTestMetadata()), + }, + }, + }}}, + }, + expected: true, + }, + { + name: "AWS Athena workgroup unencrypted", + input: state.State{AWS: aws.AWS{Athena: athena.Athena{ + Workgroups: []athena.Workgroup{ + { + Metadata: trivyTypes.NewTestMetadata(), + Encryption: athena.EncryptionConfiguration{ + Metadata: trivyTypes.NewTestMetadata(), + Type: trivyTypes.String(athena.EncryptionTypeNone, trivyTypes.NewTestMetadata()), + }, + }, + }}}, + }, + expected: true, + }, + { + name: "AWS Athena database and workgroup encrypted", + input: state.State{AWS: aws.AWS{Athena: athena.Athena{ + Databases: []athena.Database{ + { + Metadata: trivyTypes.NewTestMetadata(), + Encryption: athena.EncryptionConfiguration{ + Metadata: trivyTypes.NewTestMetadata(), + Type: trivyTypes.String(athena.EncryptionTypeSSEKMS, trivyTypes.NewTestMetadata()), + }, + }, + }, + Workgroups: []athena.Workgroup{ + { + Metadata: trivyTypes.NewTestMetadata(), + Encryption: athena.EncryptionConfiguration{ + Metadata: trivyTypes.NewTestMetadata(), + Type: trivyTypes.String(athena.EncryptionTypeSSEKMS, trivyTypes.NewTestMetadata()), + }, + }, + }}}, + }, + expected: false, + }, + }, + "AVD-AWS-0007": { + { + name: "AWS Athena workgroup doesn't enforce configuration", + input: state.State{AWS: aws.AWS{Athena: athena.Athena{ + Workgroups: []athena.Workgroup{ + { + Metadata: trivyTypes.NewTestMetadata(), + EnforceConfiguration: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), + }, + }, + }}}, + expected: true, + }, + { + name: "AWS Athena workgroup enforces configuration", + input: state.State{AWS: aws.AWS{Athena: athena.Athena{ + Workgroups: []athena.Workgroup{ + { + Metadata: trivyTypes.NewTestMetadata(), + EnforceConfiguration: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), + }, + }, + }}}, + expected: false, + }, + }, + "AVD-AWS-0014": { + { + name: "AWS CloudTrail not enabled across all regions", + input: state.State{AWS: aws.AWS{CloudTrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: trivyTypes.NewTestMetadata(), + IsMultiRegion: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), + }, + }, + }}}, + expected: true, + }, + { + name: "AWS CloudTrail enabled across all regions", + input: state.State{AWS: aws.AWS{CloudTrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: trivyTypes.NewTestMetadata(), + IsMultiRegion: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), + }, + }, + }}}, + expected: false, + }, + }, + "AVD-AWS-0016": { + { + name: "AWS CloudTrail without logfile validation", + input: state.State{AWS: aws.AWS{CloudTrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: trivyTypes.NewTestMetadata(), + EnableLogFileValidation: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), + }, + }, + }}}, + expected: true, + }, + { + name: "AWS CloudTrail with logfile validation enabled", + input: state.State{AWS: aws.AWS{CloudTrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: trivyTypes.NewTestMetadata(), + EnableLogFileValidation: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), + }, + }, + }}}, + expected: false, + }, + }, + "AVD-AWS-0015": { + { + name: "AWS CloudTrail without CMK", + input: state.State{AWS: aws.AWS{CloudTrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: trivyTypes.NewTestMetadata(), + KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()), + }, + }, + }}}, + expected: true, + }, + { + name: "AWS CloudTrail with CMK", + input: state.State{AWS: aws.AWS{CloudTrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: trivyTypes.NewTestMetadata(), + KMSKeyID: trivyTypes.String("some-kms-key", trivyTypes.NewTestMetadata()), + }, + }, + }}}, + expected: false, + }, + }, + "AVD-AWS-0162": { + { + name: "Trail has cloudwatch configured", + input: state.State{AWS: aws.AWS{CloudTrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: trivyTypes.NewTestMetadata(), + CloudWatchLogsLogGroupArn: trivyTypes.String("arn:aws:logs:us-east-1:123456789012:log-group:my-log-group", trivyTypes.NewTestMetadata()), + }, + }, + }}}, + expected: false, + }, + { + name: "Trail does not have cloudwatch configured", + input: state.State{AWS: aws.AWS{CloudTrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: trivyTypes.NewTestMetadata(), + CloudWatchLogsLogGroupArn: trivyTypes.String("", trivyTypes.NewTestMetadata()), + }, + }, + }}}, + expected: true, + }, + }, + "AVD-AWS-0161": { + { + name: "Trail has bucket with no public access", + input: state.State{AWS: aws.AWS{ + CloudTrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: trivyTypes.NewTestMetadata(), + BucketName: trivyTypes.String("my-bucket", trivyTypes.NewTestMetadata()), + }, + }, + }, + S3: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: trivyTypes.NewTestMetadata(), + Name: trivyTypes.String("my-bucket", trivyTypes.NewTestMetadata()), + ACL: trivyTypes.String("private", trivyTypes.NewTestMetadata()), + }, + }, + }, + }}, + expected: false, + }, + { + name: "Trail has bucket with public access", + input: state.State{AWS: aws.AWS{ + CloudTrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: trivyTypes.NewTestMetadata(), + BucketName: trivyTypes.String("my-bucket", trivyTypes.NewTestMetadata()), + }, + }, + }, + S3: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: trivyTypes.NewTestMetadata(), + Name: trivyTypes.String("my-bucket", trivyTypes.NewTestMetadata()), + ACL: trivyTypes.String("public-read", trivyTypes.NewTestMetadata()), + }, + }, + }, + }}, + expected: true, + }, + }, + "AVD-AWS-0163": { + { + name: "Trail has bucket with logging enabled", + input: state.State{AWS: aws.AWS{ + CloudTrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: trivyTypes.NewTestMetadata(), + BucketName: trivyTypes.String("my-bucket", trivyTypes.NewTestMetadata()), + }, + }, + }, + S3: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: trivyTypes.NewTestMetadata(), + Name: trivyTypes.String("my-bucket", trivyTypes.NewTestMetadata()), + Logging: s3.Logging{ + Metadata: trivyTypes.NewTestMetadata(), + Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), + }, + }, + }, + }, + }}, + expected: false, + }, + { + name: "Trail has bucket without logging enabled", + input: state.State{AWS: aws.AWS{ + CloudTrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: trivyTypes.NewTestMetadata(), + BucketName: trivyTypes.String("my-bucket", trivyTypes.NewTestMetadata()), + }, + }, + }, + S3: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: trivyTypes.NewTestMetadata(), + Name: trivyTypes.String("my-bucket", trivyTypes.NewTestMetadata()), + Logging: s3.Logging{ + Metadata: trivyTypes.NewTestMetadata(), + Enabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), + }, + }, + }, + }, + }}, + expected: true, + }, + }, + "AVD-AWS-0018": { + { + name: "AWS Codebuild project with unencrypted artifact", + input: state.State{AWS: aws.AWS{CodeBuild: codebuild.CodeBuild{ + Projects: []codebuild.Project{ + { + Metadata: trivyTypes.NewTestMetadata(), + ArtifactSettings: codebuild.ArtifactSettings{ + Metadata: trivyTypes.NewTestMetadata(), + EncryptionEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), + }, + }, + }, + }}}, + expected: true, + }, + { + name: "AWS Codebuild project with unencrypted secondary artifact", + input: state.State{AWS: aws.AWS{CodeBuild: codebuild.CodeBuild{ + Projects: []codebuild.Project{ + { + Metadata: trivyTypes.NewTestMetadata(), + ArtifactSettings: codebuild.ArtifactSettings{ + Metadata: trivyTypes.NewTestMetadata(), + EncryptionEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), + }, + SecondaryArtifactSettings: []codebuild.ArtifactSettings{ + { + Metadata: trivyTypes.NewTestMetadata(), + EncryptionEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), + }, + }, + }, + }, + }}}, + expected: true, + }, + { + name: "AWS Codebuild with encrypted artifacts", + input: state.State{AWS: aws.AWS{CodeBuild: codebuild.CodeBuild{ + Projects: []codebuild.Project{ + { + Metadata: trivyTypes.NewTestMetadata(), + ArtifactSettings: codebuild.ArtifactSettings{ + Metadata: trivyTypes.NewTestMetadata(), + EncryptionEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), + }, + SecondaryArtifactSettings: []codebuild.ArtifactSettings{ + { + Metadata: trivyTypes.NewTestMetadata(), + EncryptionEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), + }, + }, + }, + }, + }}}, + expected: false, + }, + }, + "AVD-AWS-0019": { + { + name: "AWS Config aggregator source with all regions set to false", + input: state.State{AWS: aws.AWS{Config: config.Config{ + ConfigurationAggregrator: config.ConfigurationAggregrator{ + Metadata: trivyTypes.NewTestMetadata(), + SourceAllRegions: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), + }, + }, + }}, + expected: true, + }, + { + name: "AWS Config aggregator source with all regions set to true", + input: state.State{AWS: aws.AWS{Config: config.Config{ + ConfigurationAggregrator: config.ConfigurationAggregrator{ + Metadata: trivyTypes.NewTestMetadata(), + SourceAllRegions: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), + }, + }}}, + expected: false, + }, + }, + "AVD-AWS-0020": { + { + name: "DocDB Cluster not exporting logs", + input: state.State{AWS: aws.AWS{DocumentDB: documentdb.DocumentDB{ + Clusters: []documentdb.Cluster{ + { + Metadata: trivyTypes.NewTestMetadata(), + EnabledLogExports: []trivyTypes.StringValue{ + trivyTypes.String("", trivyTypes.NewTestMetadata()), + }, + }, + }, + }}}, + expected: true, + }, + { + name: "DocDB Cluster exporting audit logs", + input: state.State{AWS: aws.AWS{DocumentDB: documentdb.DocumentDB{ + Clusters: []documentdb.Cluster{ + { + Metadata: trivyTypes.NewTestMetadata(), + EnabledLogExports: []trivyTypes.StringValue{ + trivyTypes.String(documentdb.LogExportAudit, trivyTypes.NewTestMetadata()), + }, + }, + }, + }}}, + expected: false, + }, + { + name: "DocDB Cluster exporting profiler logs", + input: state.State{AWS: aws.AWS{DocumentDB: documentdb.DocumentDB{ + Clusters: []documentdb.Cluster{ + { + Metadata: trivyTypes.NewTestMetadata(), + EnabledLogExports: []trivyTypes.StringValue{ + trivyTypes.String(documentdb.LogExportProfiler, trivyTypes.NewTestMetadata()), + }, + }, + }, + }}}, + expected: false, + }, + }, + "AVD-AWS-0021": { + { + name: "DocDB unencrypted storage", + input: state.State{AWS: aws.AWS{DocumentDB: documentdb.DocumentDB{ + Clusters: []documentdb.Cluster{ + { + Metadata: trivyTypes.NewTestMetadata(), + StorageEncrypted: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), + }, + }, + }}}, + expected: true, + }, + { + name: "DocDB encrypted storage", + input: state.State{AWS: aws.AWS{DocumentDB: documentdb.DocumentDB{ + Clusters: []documentdb.Cluster{ + { + Metadata: trivyTypes.NewTestMetadata(), + StorageEncrypted: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), + }, + }, + }}}, + expected: false, + }, + }, + "AVD-AWS-0022": { + { + name: "DocDB Cluster encryption missing KMS key", + input: state.State{AWS: aws.AWS{DocumentDB: documentdb.DocumentDB{ + Clusters: []documentdb.Cluster{ + { + Metadata: trivyTypes.NewTestMetadata(), + KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()), + }, + }, + }}}, + expected: true, + }, + { + name: "DocDB Instance encryption missing KMS key", + input: state.State{AWS: aws.AWS{DocumentDB: documentdb.DocumentDB{ + Clusters: []documentdb.Cluster{ + { + Metadata: trivyTypes.NewTestMetadata(), + KMSKeyID: trivyTypes.String("kms-key", trivyTypes.NewTestMetadata()), + Instances: []documentdb.Instance{ + { + Metadata: trivyTypes.NewTestMetadata(), + KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()), + }, + }, + }, + }, + }}}, + expected: true, + }, + { + name: "DocDB Cluster and Instance encrypted with proper KMS keys", + input: state.State{AWS: aws.AWS{DocumentDB: documentdb.DocumentDB{ + Clusters: []documentdb.Cluster{ + { + Metadata: trivyTypes.NewTestMetadata(), + KMSKeyID: trivyTypes.String("kms-key", trivyTypes.NewTestMetadata()), + Instances: []documentdb.Instance{ + { + Metadata: trivyTypes.NewTestMetadata(), + KMSKeyID: trivyTypes.String("kms-key", trivyTypes.NewTestMetadata()), + }, + }, + }, + }, + }}}, + expected: false, + }, + }, + "AVD-AWS-0023": { + { + name: "Cluster with SSE disabled", + input: state.State{AWS: aws.AWS{DynamoDB: dynamodb.DynamoDB{ + DAXClusters: []dynamodb.DAXCluster{ + { + Metadata: trivyTypes.NewTestMetadata(), + ServerSideEncryption: dynamodb.ServerSideEncryption{ + Metadata: trivyTypes.NewTestMetadata(), + Enabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), + }, + }, + }, + }}}, + expected: true, + }, + { + name: "Cluster with SSE enabled", + input: state.State{AWS: aws.AWS{DynamoDB: dynamodb.DynamoDB{ + DAXClusters: []dynamodb.DAXCluster{ + { + Metadata: trivyTypes.NewTestMetadata(), + ServerSideEncryption: dynamodb.ServerSideEncryption{ + Metadata: trivyTypes.NewTestMetadata(), + Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), + }, + }, + }, + }}}, + expected: false, + }, + }, + "AVD-AWS-0024": { + { + name: "Cluster with point in time recovery disabled", + input: state.State{AWS: aws.AWS{DynamoDB: dynamodb.DynamoDB{ + DAXClusters: []dynamodb.DAXCluster{ + { + Metadata: trivyTypes.NewTestMetadata(), + PointInTimeRecovery: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), + }, + }, + }}}, + expected: true, + }, + { + name: "Cluster with point in time recovery enabled", + input: state.State{AWS: aws.AWS{DynamoDB: dynamodb.DynamoDB{ + DAXClusters: []dynamodb.DAXCluster{ + { + Metadata: trivyTypes.NewTestMetadata(), + PointInTimeRecovery: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), + }, + }, + }}}, + expected: false, + }, + }, + "AVD-AWS-0025": { + { + name: "Cluster encryption missing KMS key", + input: state.State{AWS: aws.AWS{DynamoDB: dynamodb.DynamoDB{ + Tables: []dynamodb.Table{ + { + Metadata: trivyTypes.NewTestMetadata(), + ServerSideEncryption: dynamodb.ServerSideEncryption{ + Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), + Metadata: trivyTypes.NewTestMetadata(), + KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()), + }, + }, + }, + }}}, + expected: true, + }, + { + name: "Cluster encryption using default KMS key", + input: state.State{AWS: aws.AWS{DynamoDB: dynamodb.DynamoDB{ + Tables: []dynamodb.Table{ + { + Metadata: trivyTypes.NewTestMetadata(), + ServerSideEncryption: dynamodb.ServerSideEncryption{ + Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), + Metadata: trivyTypes.NewTestMetadata(), + KMSKeyID: trivyTypes.String(dynamodb.DefaultKMSKeyID, trivyTypes.NewTestMetadata()), + }, + }, + }, + }}}, + expected: true, + }, + { + name: "Cluster encryption using proper KMS key", + input: state.State{AWS: aws.AWS{DynamoDB: dynamodb.DynamoDB{ + Tables: []dynamodb.Table{ + { + Metadata: trivyTypes.NewTestMetadata(), + ServerSideEncryption: dynamodb.ServerSideEncryption{ + Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), + Metadata: trivyTypes.NewTestMetadata(), + KMSKeyID: trivyTypes.String("some-ok-key", trivyTypes.NewTestMetadata()), + }, + }, + }, + }}}, + expected: false, + }, + { + name: "KMS key exist, but SSE is not enabled", + input: state.State{AWS: aws.AWS{DynamoDB: dynamodb.DynamoDB{ + Tables: []dynamodb.Table{ + { + Metadata: trivyTypes.NewTestMetadata(), + ServerSideEncryption: dynamodb.ServerSideEncryption{ + Enabled: trivyTypes.BoolDefault(false, trivyTypes.NewTestMetadata()), + Metadata: trivyTypes.NewTestMetadata(), + KMSKeyID: trivyTypes.String("some-ok-key", trivyTypes.NewTestMetadata()), + }, + }, + }, + }}}, + expected: true, + }, + }, + } + + regoScanner := rego.NewScanner(trivyTypes.SourceCloud) + err := regoScanner.LoadPolicies(true, false, checks.EmbeddedPolicyFileSystem, []string{"."}, nil) + require.NoError(t, err) + + missedIDs, _ := lo.Difference(getMigratedChecksIDs(), lo.Keys(tests)) + assert.Emptyf(t, missedIDs, "Checks %v not covered", missedIDs) + + for id, cases := range tests { + t.Run(id, func(t *testing.T) { + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + scanState(t, regoScanner, tc.input, id, tc.expected) + }) + } + }) + } +} + +func getMigratedChecksIDs() []string { + allChecks := rules.GetRegistered() + + goChecksIDs := lo.FilterMap(allChecks, func(r ruleTypes.RegisteredRule, _ int) (string, bool) { + return r.AVDID, r.Check != nil + }) + + regoChecksMap := lo.SliceToMap(lo.Filter(allChecks, func(r ruleTypes.RegisteredRule, _ int) bool { + return r.Check == nil + }), func(r ruleTypes.RegisteredRule) (string, any) { + return r.AVDID, struct{}{} + }) + + return lo.Filter(goChecksIDs, func(avdID string, _ int) bool { + _, exists := regoChecksMap[avdID] + return exists + }) +}