diff --git a/avd_docs/aws/iam/AVD-AWS-0165/docs.md b/avd_docs/aws/iam/AVD-AWS-0165/docs.md new file mode 100644 index 000000000..d8c6497ee --- /dev/null +++ b/avd_docs/aws/iam/AVD-AWS-0165/docs.md @@ -0,0 +1,15 @@ + + +Hardware MFA adds an extra layer of protection on top of a user name and password. With MFA enabled, when a user signs in to an AWS website, they're prompted for their user name and password and for an authentication code from their AWS MFA device. + + +### Impact +Compromise of the root account compromises the entire AWS account and all resources within it. + + +{{ remediationActions }} + +### Links +- https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_physical.html + + diff --git a/avd_docs/aws/iam/AVD-AWS-0166/docs.md b/avd_docs/aws/iam/AVD-AWS-0166/docs.md new file mode 100644 index 000000000..cd7ff838f --- /dev/null +++ b/avd_docs/aws/iam/AVD-AWS-0166/docs.md @@ -0,0 +1,15 @@ + + +Disabling or removing unnecessary credentials will reduce the window of opportunity for credentials associated with a compromised or abandoned account to be used. + + +### Impact +Leaving unused credentials active widens the scope for compromise. + + +{{ remediationActions }} + +### Links +- https://console.aws.amazon.com/iam/ + + diff --git a/avd_docs/aws/iam/AVD-AWS-0167/docs.md b/avd_docs/aws/iam/AVD-AWS-0167/docs.md new file mode 100644 index 000000000..12d6f6cac --- /dev/null +++ b/avd_docs/aws/iam/AVD-AWS-0167/docs.md @@ -0,0 +1,15 @@ + + +Multiple active access keys widens the scope for compromise. + + +### Impact +Widened scope for compromise. + + +{{ remediationActions }} + +### Links +- https://console.aws.amazon.com/iam/ + + diff --git a/avd_docs/aws/iam/AVD-AWS-0168/docs.md b/avd_docs/aws/iam/AVD-AWS-0168/docs.md new file mode 100644 index 000000000..503c7d0ec --- /dev/null +++ b/avd_docs/aws/iam/AVD-AWS-0168/docs.md @@ -0,0 +1,18 @@ + + +Removing expired SSL/TLS certificates eliminates the risk that an invalid certificate will be +deployed accidentally to a resource such as AWS Elastic Load Balancer (ELB), which can +damage the credibility of the application/website behind the ELB. As a best practice, it is +recommended to delete expired certificates. + + +### Impact +Risk of misconfiguration and damage to credibility + + +{{ remediationActions }} + +### Links +- https://console.aws.amazon.com/iam/ + + diff --git a/avd_docs/aws/iam/AVD-AWS-0169/docs.md b/avd_docs/aws/iam/AVD-AWS-0169/docs.md new file mode 100644 index 000000000..e65af6230 --- /dev/null +++ b/avd_docs/aws/iam/AVD-AWS-0169/docs.md @@ -0,0 +1,16 @@ + + +By implementing least privilege for access control, an IAM Role will require an appropriate +IAM Policy to allow Support Center Access in order to manage Incidents with AWS Support. + + +### Impact +Incident management is not possible without a support role. + + +{{ remediationActions }} + +### Links +- https://console.aws.amazon.com/iam/ + + diff --git a/avd_docs/aws/s3/AVD-AWS-0170/docs.md b/avd_docs/aws/s3/AVD-AWS-0170/docs.md new file mode 100644 index 000000000..9b01e9f47 --- /dev/null +++ b/avd_docs/aws/s3/AVD-AWS-0170/docs.md @@ -0,0 +1,15 @@ + + +Adding MFA delete to an S3 bucket, requires additional authentication when you change the version state of your bucket or you delete an object version, adding another layer of security in the event your security credentials are compromised or unauthorized access is obtained. + + +### Impact +Lessened protection against accidental/malicious deletion of data + + +{{ remediationActions }} + +### Links +- https://docs.aws.amazon.com/AmazonS3/latest/userguide/MultiFactorAuthenticationDelete.html + + diff --git a/avd_docs/aws/s3/AVD-AWS-0171/docs.md b/avd_docs/aws/s3/AVD-AWS-0171/docs.md new file mode 100644 index 000000000..3ecf75235 --- /dev/null +++ b/avd_docs/aws/s3/AVD-AWS-0171/docs.md @@ -0,0 +1,15 @@ + + +Enabling object-level logging will help you meet data compliance requirements within your organization, perform comprehensive security analysis, monitor specific patterns of user behavior in your AWS account or take immediate actions on any object-level API activity within your S3 Buckets using Amazon CloudWatch Events. + + +### Impact +Difficult/impossible to audit bucket object/data changes. + + +{{ remediationActions }} + +### Links +- https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-cloudtrail-logging-for-s3.html + + diff --git a/avd_docs/aws/s3/AVD-AWS-0172/docs.md b/avd_docs/aws/s3/AVD-AWS-0172/docs.md new file mode 100644 index 000000000..3ecf75235 --- /dev/null +++ b/avd_docs/aws/s3/AVD-AWS-0172/docs.md @@ -0,0 +1,15 @@ + + +Enabling object-level logging will help you meet data compliance requirements within your organization, perform comprehensive security analysis, monitor specific patterns of user behavior in your AWS account or take immediate actions on any object-level API activity within your S3 Buckets using Amazon CloudWatch Events. + + +### Impact +Difficult/impossible to audit bucket object/data changes. + + +{{ remediationActions }} + +### Links +- https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-cloudtrail-logging-for-s3.html + + diff --git a/internal/adapters/cloud/aws/cloudtrail/adapt.go b/internal/adapters/cloud/aws/cloudtrail/adapt.go index 2199e402b..180c639aa 100644 --- a/internal/adapters/cloud/aws/cloudtrail/adapt.go +++ b/internal/adapters/cloud/aws/cloudtrail/adapt.go @@ -107,6 +107,39 @@ func (a *adapter) adaptTrail(info types.TrailInfo) (*cloudtrail.Trail, error) { isLogging = defsecTypes.Bool(*status.IsLogging, metadata) } + var eventSelectors []cloudtrail.EventSelector + if response.Trail.HasCustomEventSelectors != nil && *response.Trail.HasCustomEventSelectors { + output, err := a.client.GetEventSelectors(a.Context(), &api.GetEventSelectorsInput{ + TrailName: info.Name, + }) + if err != nil { + return nil, err + } + for _, eventSelector := range output.EventSelectors { + var resources []cloudtrail.DataResource + for _, dataResource := range eventSelector.DataResources { + typ := defsecTypes.StringDefault("", metadata) + if dataResource.Type != nil { + typ = defsecTypes.String(*dataResource.Type, metadata) + } + var values defsecTypes.StringValueList + for _, value := range dataResource.Values { + values = append(values, defsecTypes.String(value, metadata)) + } + resources = append(resources, cloudtrail.DataResource{ + Metadata: metadata, + Type: typ, + Values: values, + }) + } + eventSelectors = append(eventSelectors, cloudtrail.EventSelector{ + Metadata: metadata, + DataResources: resources, + ReadWriteType: defsecTypes.String(string(eventSelector.ReadWriteType), metadata), + }) + } + } + return &cloudtrail.Trail{ Metadata: metadata, Name: name, @@ -116,5 +149,6 @@ func (a *adapter) adaptTrail(info types.TrailInfo) (*cloudtrail.Trail, error) { KMSKeyID: defsecTypes.String(kmsKeyId, metadata), IsLogging: isLogging, BucketName: defsecTypes.String(bucketName, metadata), + EventSelectors: eventSelectors, }, nil } diff --git a/internal/adapters/cloud/aws/s3/s3.go b/internal/adapters/cloud/aws/s3/s3.go index 3f2d1a372..14d9abc5e 100644 --- a/internal/adapters/cloud/aws/s3/s3.go +++ b/internal/adapters/cloud/aws/s3/s3.go @@ -207,8 +207,9 @@ func (a *adapter) getBucketEncryption(bucketName *string, metadata defsecTypes.M func (a *adapter) getBucketVersioning(bucketName *string, metadata defsecTypes.Metadata) s3.Versioning { bucketVersioning := s3.Versioning{ - Metadata: metadata, - Enabled: defsecTypes.BoolDefault(false, metadata), + Metadata: metadata, + Enabled: defsecTypes.BoolDefault(false, metadata), + MFADelete: defsecTypes.BoolDefault(false, metadata), } versioning, err := a.api.GetBucketVersioning(a.Context(), &s3api.GetBucketVersioningInput{Bucket: bucketName}) @@ -227,6 +228,8 @@ func (a *adapter) getBucketVersioning(bucketName *string, metadata defsecTypes.M bucketVersioning.Enabled = defsecTypes.Bool(true, metadata) } + bucketVersioning.MFADelete = defsecTypes.Bool(versioning.MFADelete == s3types.MFADeleteStatusEnabled, metadata) + return bucketVersioning } diff --git a/internal/adapters/cloudformation/aws/s3/bucket.go b/internal/adapters/cloudformation/aws/s3/bucket.go index 96beece11..15ee37e14 100644 --- a/internal/adapters/cloudformation/aws/s3/bucket.go +++ b/internal/adapters/cloudformation/aws/s3/bucket.go @@ -23,8 +23,9 @@ func getBuckets(cfFile parser.FileContext) []s3.Bucket { PublicAccessBlock: getPublicAccessBlock(r), Encryption: getEncryption(r, cfFile), Versioning: s3.Versioning{ - Metadata: r.Metadata(), - Enabled: hasVersioning(r), + Metadata: r.Metadata(), + Enabled: hasVersioning(r), + MFADelete: defsecTypes.BoolUnresolvable(r.Metadata()), }, Logging: getLogging(r), ACL: convertAclValue(r.GetStringProperty("AccessControl", "private")), diff --git a/internal/adapters/terraform/aws/cloudtrail/adapt.go b/internal/adapters/terraform/aws/cloudtrail/adapt.go index eac234ade..0a9e8b0d5 100644 --- a/internal/adapters/terraform/aws/cloudtrail/adapt.go +++ b/internal/adapters/terraform/aws/cloudtrail/adapt.go @@ -35,6 +35,24 @@ func adaptTrail(resource *terraform.Block) cloudtrail.Trail { KMSKeyIDAttr := resource.GetAttribute("kms_key_id") KMSKeyIDVal := KMSKeyIDAttr.AsStringValueOrDefault("", resource) + var selectors []cloudtrail.EventSelector + for _, selBlock := range resource.GetBlocks("event_selector") { + var resources []cloudtrail.DataResource + for _, resBlock := range selBlock.GetBlocks("data_resource") { + resources = append(resources, cloudtrail.DataResource{ + Metadata: resBlock.GetMetadata(), + Type: resBlock.GetAttribute("type").AsStringValueOrDefault("", resBlock), + Values: resBlock.GetAttribute("values").AsStringValues(), + }) + } + selector := cloudtrail.EventSelector{ + Metadata: selBlock.GetMetadata(), + DataResources: resources, + ReadWriteType: selBlock.GetAttribute("read_write_type").AsStringValueOrDefault("All", selBlock), + } + selectors = append(selectors, selector) + } + return cloudtrail.Trail{ Metadata: resource.GetMetadata(), Name: nameVal, @@ -44,5 +62,6 @@ func adaptTrail(resource *terraform.Block) cloudtrail.Trail { CloudWatchLogsLogGroupArn: resource.GetAttribute("cloud_watch_logs_group_arn").AsStringValueOrDefault("", resource), IsLogging: resource.GetAttribute("enable_logging").AsBoolValueOrDefault(true, resource), BucketName: resource.GetAttribute("s3_bucket_name").AsStringValueOrDefault("", resource), + EventSelectors: selectors, } } diff --git a/internal/adapters/terraform/aws/s3/adapt_test.go b/internal/adapters/terraform/aws/s3/adapt_test.go index 9addecc1a..17f87c962 100644 --- a/internal/adapters/terraform/aws/s3/adapt_test.go +++ b/internal/adapters/terraform/aws/s3/adapt_test.go @@ -173,6 +173,7 @@ func Test_Adapt(t *testing.T) { bucket = aws_s3_bucket.example.id versioning_configuration { status = "Enabled" + mfa_delete = "Enabled" } } @@ -239,8 +240,9 @@ func Test_Adapt(t *testing.T) { KMSKeyId: defsecTypes.String("string-key", defsecTypes.NewTestMetadata()), }, Versioning: s3.Versioning{ - Metadata: defsecTypes.NewTestMetadata(), - Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMetadata()), + Metadata: defsecTypes.NewTestMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMetadata()), + MFADelete: defsecTypes.Bool(true, defsecTypes.NewTestMetadata()), }, Logging: s3.Logging{ Metadata: defsecTypes.NewTestMetadata(), diff --git a/internal/adapters/terraform/aws/s3/bucket.go b/internal/adapters/terraform/aws/s3/bucket.go index 291cc17a9..4b714ae77 100644 --- a/internal/adapters/terraform/aws/s3/bucket.go +++ b/internal/adapters/terraform/aws/s3/bucket.go @@ -81,11 +81,14 @@ func getEncryption(block *terraform.Block, a *adapter) s3.Encryption { } func getVersioning(block *terraform.Block, a *adapter) s3.Versioning { - if block.HasChild("versioning") { - return s3.Versioning{ - Metadata: block.GetMetadata(), - Enabled: isVersioned(block), - } + versioning := s3.Versioning{ + Metadata: block.GetMetadata(), + Enabled: defsecTypes.BoolDefault(false, block.GetMetadata()), + MFADelete: defsecTypes.BoolDefault(false, block.GetMetadata()), + } + if vBlock := block.GetBlock("versioning"); vBlock != nil { + versioning.Enabled = vBlock.GetAttribute("enabled").AsBoolValueOrDefault(true, vBlock) + versioning.MFADelete = vBlock.GetAttribute("mfa_delete").AsBoolValueOrDefault(false, vBlock) } for _, versioningResource := range a.modules.GetResourcesByType("aws_s3_bucket_versioning") { bucketAttr := versioningResource.GetAttribute("bucket") @@ -93,27 +96,35 @@ func getVersioning(block *terraform.Block, a *adapter) s3.Versioning { if bucketAttr.IsString() { actualBucketName := block.GetAttribute("bucket").AsStringValueOrDefault("", block).Value() if bucketAttr.Equals(block.ID()) || bucketAttr.Equals(actualBucketName) { - return s3.Versioning{ - Metadata: versioningResource.GetMetadata(), - Enabled: isVersioned(versioningResource), - } + return getVersioningFromResource(versioningResource) } } if referencedBlock, err := a.modules.GetReferencedBlock(bucketAttr, versioningResource); err == nil { if referencedBlock.ID() == block.ID() { - return s3.Versioning{ - Metadata: versioningResource.GetMetadata(), - Enabled: isVersioned(versioningResource), - } + return getVersioningFromResource(versioningResource) } } } } + return versioning +} - return s3.Versioning{ - Metadata: block.GetMetadata(), - Enabled: defsecTypes.BoolDefault(false, block.GetMetadata()), +// from aws_s3_bucket_versioning +func getVersioningFromResource(block *terraform.Block) s3.Versioning { + versioning := s3.Versioning{ + Metadata: block.GetMetadata(), + Enabled: defsecTypes.BoolDefault(false, block.GetMetadata()), + MFADelete: defsecTypes.BoolDefault(false, block.GetMetadata()), } + if config := block.GetBlock("versioning_configuration"); config != nil { + if status := config.GetAttribute("status"); status.IsNotNil() { + versioning.Enabled = defsecTypes.Bool(status.Equals("Enabled", terraform.IgnoreCase), status.GetMetadata()) + } + if mfa := config.GetAttribute("mfa_delete"); mfa.IsNotNil() { + versioning.MFADelete = defsecTypes.Bool(mfa.Equals("Enabled", terraform.IgnoreCase), mfa.GetMetadata()) + } + } + return versioning } func getLogging(block *terraform.Block, a *adapter) s3.Logging { @@ -222,21 +233,3 @@ func hasLogging(b *terraform.Block) defsecTypes.BoolValue { } return defsecTypes.BoolDefault(false, b.GetMetadata()) } - -func isVersioned(b *terraform.Block) defsecTypes.BoolValue { - if versioningBlock := b.GetBlock("versioning"); versioningBlock.IsNotNil() { - return versioningBlock.GetAttribute("enabled").AsBoolValueOrDefault(true, versioningBlock) - } - if versioningBlock := b.GetBlock("versioning_configuration"); versioningBlock.IsNotNil() { - status := versioningBlock.GetAttribute("status") - if status.Equals("Enabled", terraform.IgnoreCase) { - return defsecTypes.Bool(true, status.GetMetadata()) - } else { - return defsecTypes.Bool(false, b.GetMetadata()) - } - } - return defsecTypes.BoolDefault( - false, - b.GetMetadata(), - ) -} diff --git a/internal/adapters/terraform/google/iam/project_iam_test.go b/internal/adapters/terraform/google/iam/project_iam_test.go index 8fbb3f46e..6984fad50 100644 --- a/internal/adapters/terraform/google/iam/project_iam_test.go +++ b/internal/adapters/terraform/google/iam/project_iam_test.go @@ -11,33 +11,6 @@ import ( "github.com/aquasecurity/defsec/test/testutil" ) -func Test_AdaptMember(t *testing.T) { - t.SkipNow() - tests := []struct { - name string - terraform string - expected iam.Member - }{ - { - name: "basic", - terraform: ` -resource "" "example" { - -} -`, - expected: iam.Member{}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") - adapted := AdaptMember(modules.GetBlocks()[0], modules) - testutil.AssertDefsecEqual(t, test.expected, adapted) - }) - } -} - func Test_AdaptBinding(t *testing.T) { tests := []struct { name string diff --git a/internal/rules/aws/s3/enable_object_read_logging.go b/internal/rules/aws/s3/enable_object_read_logging.go new file mode 100755 index 000000000..984f5921d --- /dev/null +++ b/internal/rules/aws/s3/enable_object_read_logging.go @@ -0,0 +1,85 @@ +package s3 + +import ( + "fmt" + + "github.com/aquasecurity/defsec/internal/rules" + "github.com/aquasecurity/defsec/pkg/framework" + "github.com/aquasecurity/defsec/pkg/providers" + "github.com/aquasecurity/defsec/pkg/scan" + "github.com/aquasecurity/defsec/pkg/severity" + "github.com/aquasecurity/defsec/pkg/state" +) + +var CheckEnableObjectReadLogging = rules.Register( + scan.Rule{ + AVDID: "AVD-AWS-0172", + Provider: providers.AWSProvider, + Service: "s3", + ShortCode: "enable-object-read-logging", + Frameworks: map[framework.Framework][]string{ + framework.CIS_AWS_1_4: {"3.11"}, + }, + Summary: "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.", + Impact: "Difficult/impossible to audit bucket object/data changes.", + Resolution: "Enable Object-level logging for S3 buckets.", + Explanation: ` +Enabling object-level logging will help you meet data compliance requirements within your organization, perform comprehensive security analysis, monitor specific patterns of user behavior in your AWS account or take immediate actions on any object-level API activity within your S3 Buckets using Amazon CloudWatch Events. +`, + Links: []string{ + "https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-cloudtrail-logging-for-s3.html", + }, + Severity: severity.Low, + Terraform: &scan.EngineMetadata{ + GoodExamples: terraformEnableObjectReadLoggingGoodExamples, + BadExamples: terraformEnableObjectReadLoggingBadExamples, + Links: terraformEnableObjectReadLoggingLinks, + RemediationMarkdown: terraformEnableObjectReadLoggingRemediationMarkdown, + }, + }, + func(s *state.State) (results scan.Results) { + for _, bucket := range s.AWS.S3.Buckets { + if !bucket.Name.GetMetadata().IsResolvable() { + continue + } + bucketName := bucket.Name.Value() + var hasReadLogging bool + for _, trail := range s.AWS.CloudTrail.Trails { + for _, selector := range trail.EventSelectors { + if selector.ReadWriteType.EqualTo("WriteOnly") { + continue + } + for _, dataResource := range selector.DataResources { + if dataResource.Type.NotEqualTo("AWS::S3::Object") { + continue + } + for _, partialARN := range dataResource.Values { + partial := partialARN.Value() + if partial == "arn:aws:s3" { // logging for all of s3 is enabled + hasReadLogging = true + break + } + // the slash is important as it enables logging for objects inside bucket + if partial == fmt.Sprintf("arn:aws:s3:::%s/", bucketName) { + hasReadLogging = true + break + } + } + } + } + if hasReadLogging { + break + } + } + if !hasReadLogging { + results.Add( + "Bucket does not have object-level read logging enabled", + &bucket, + ) + } else { + results.AddPassed(&bucket) + } + } + return results + }, +) diff --git a/internal/rules/aws/s3/enable_object_read_logging.tf.go b/internal/rules/aws/s3/enable_object_read_logging.tf.go new file mode 100644 index 000000000..a36d955fb --- /dev/null +++ b/internal/rules/aws/s3/enable_object_read_logging.tf.go @@ -0,0 +1,34 @@ +package s3 + +var terraformEnableObjectReadLoggingGoodExamples = []string{ + ` +resource "aws_s3_bucket" "good_example" { + bucket = "my-bucket" +} + +resource "aws_cloudtrail" "example" { + event_selector { + read_write_type = "ReadOnly" # or "All" + data_resource { + type = "AWS::S3::Object" + values = ["arn:aws:s3:::${aws_s3_bucket.good_example.bucket}/"] + } + } +} + +`, +} + +var terraformEnableObjectReadLoggingBadExamples = []string{ + ` +resource "aws_s3_bucket" "bad_example" { + bucket = "my-bucket" +} +`, +} + +var terraformEnableObjectReadLoggingLinks = []string{ + `https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket#versioning`, +} + +var terraformEnableObjectReadLoggingRemediationMarkdown = `` diff --git a/internal/rules/aws/s3/enable_object_read_logging_test.go b/internal/rules/aws/s3/enable_object_read_logging_test.go new file mode 100644 index 000000000..61b8c17fb --- /dev/null +++ b/internal/rules/aws/s3/enable_object_read_logging_test.go @@ -0,0 +1,260 @@ +package s3 + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/defsec/pkg/types" + + "github.com/aquasecurity/defsec/pkg/state" + + "github.com/aquasecurity/defsec/pkg/providers/aws/cloudtrail" + "github.com/aquasecurity/defsec/pkg/providers/aws/s3" + "github.com/aquasecurity/defsec/pkg/scan" + + "github.com/stretchr/testify/assert" +) + +func TestCheckEnableObjectReadLogging(t *testing.T) { + tests := []struct { + name string + s3 s3.S3 + cloudtrail cloudtrail.CloudTrail + expected bool + }{ + { + name: "S3 bucket with no cloudtrail logging", + s3: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: defsecTypes.NewTestMetadata(), + Name: defsecTypes.String("test-bucket", defsecTypes.NewTestMetadata()), + }, + }, + }, + expected: true, + }, + { + name: "S3 bucket with WriteOnly cloudtrail logging (all of s3)", + s3: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: defsecTypes.NewTestMetadata(), + Name: defsecTypes.String("test-bucket", defsecTypes.NewTestMetadata()), + }, + }, + }, + cloudtrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: defsecTypes.NewTestMetadata(), + EventSelectors: []cloudtrail.EventSelector{ + { + Metadata: defsecTypes.NewTestMetadata(), + ReadWriteType: defsecTypes.String("WriteOnly", defsecTypes.NewTestMetadata()), + DataResources: []cloudtrail.DataResource{ + { + Metadata: defsecTypes.NewTestMetadata(), + Type: defsecTypes.String("AWS::S3::Object", defsecTypes.NewTestMetadata()), + Values: []defsecTypes.StringValue{ + defsecTypes.String("arn:aws:s3", defsecTypes.NewTestMetadata()), + }, + }, + }, + }, + }, + }, + }, + }, + expected: true, + }, + { + name: "S3 bucket with ReadOnly cloudtrail logging (all of s3)", + s3: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: defsecTypes.NewTestMetadata(), + Name: defsecTypes.String("test-bucket", defsecTypes.NewTestMetadata()), + }, + }, + }, + cloudtrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: defsecTypes.NewTestMetadata(), + EventSelectors: []cloudtrail.EventSelector{ + { + Metadata: defsecTypes.NewTestMetadata(), + ReadWriteType: defsecTypes.String("ReadOnly", defsecTypes.NewTestMetadata()), + DataResources: []cloudtrail.DataResource{ + { + Metadata: defsecTypes.NewTestMetadata(), + Type: defsecTypes.String("AWS::S3::Object", defsecTypes.NewTestMetadata()), + Values: []defsecTypes.StringValue{ + defsecTypes.String("arn:aws:s3", defsecTypes.NewTestMetadata()), + }, + }, + }, + }, + }, + }, + }, + }, + expected: false, + }, + { + name: "S3 bucket with 'All' cloudtrail logging (all of s3)", + s3: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: defsecTypes.NewTestMetadata(), + Name: defsecTypes.String("test-bucket", defsecTypes.NewTestMetadata()), + }, + }, + }, + cloudtrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: defsecTypes.NewTestMetadata(), + EventSelectors: []cloudtrail.EventSelector{ + { + Metadata: defsecTypes.NewTestMetadata(), + ReadWriteType: defsecTypes.String("All", defsecTypes.NewTestMetadata()), + DataResources: []cloudtrail.DataResource{ + { + Metadata: defsecTypes.NewTestMetadata(), + Type: defsecTypes.String("AWS::S3::Object", defsecTypes.NewTestMetadata()), + Values: []defsecTypes.StringValue{ + defsecTypes.String("arn:aws:s3", defsecTypes.NewTestMetadata()), + }, + }, + }, + }, + }, + }, + }, + }, + expected: false, + }, + { + name: "S3 bucket with 'All' cloudtrail logging (only this bucket)", + s3: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: defsecTypes.NewTestMetadata(), + Name: defsecTypes.String("test-bucket", defsecTypes.NewTestMetadata()), + }, + }, + }, + cloudtrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: defsecTypes.NewTestMetadata(), + EventSelectors: []cloudtrail.EventSelector{ + { + Metadata: defsecTypes.NewTestMetadata(), + ReadWriteType: defsecTypes.String("All", defsecTypes.NewTestMetadata()), + DataResources: []cloudtrail.DataResource{ + { + Metadata: defsecTypes.NewTestMetadata(), + Type: defsecTypes.String("AWS::S3::Object", defsecTypes.NewTestMetadata()), + Values: []defsecTypes.StringValue{ + defsecTypes.String("arn:aws:s3:::test-bucket/", defsecTypes.NewTestMetadata()), + }, + }, + }, + }, + }, + }, + }, + }, + expected: false, + }, + { + name: "S3 bucket with 'All' cloudtrail logging (only another bucket)", + s3: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: defsecTypes.NewTestMetadata(), + Name: defsecTypes.String("test-bucket", defsecTypes.NewTestMetadata()), + }, + }, + }, + cloudtrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: defsecTypes.NewTestMetadata(), + EventSelectors: []cloudtrail.EventSelector{ + { + Metadata: defsecTypes.NewTestMetadata(), + ReadWriteType: defsecTypes.String("All", defsecTypes.NewTestMetadata()), + DataResources: []cloudtrail.DataResource{ + { + Metadata: defsecTypes.NewTestMetadata(), + Type: defsecTypes.String("AWS::S3::Object", defsecTypes.NewTestMetadata()), + Values: []defsecTypes.StringValue{ + defsecTypes.String("arn:aws:s3:::test-bucket2/", defsecTypes.NewTestMetadata()), + }, + }, + }, + }, + }, + }, + }, + }, + expected: true, + }, + { + name: "S3 bucket with 'All' cloudtrail logging (this bucket, missing slash)", + s3: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: defsecTypes.NewTestMetadata(), + Name: defsecTypes.String("test-bucket", defsecTypes.NewTestMetadata()), + }, + }, + }, + cloudtrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: defsecTypes.NewTestMetadata(), + EventSelectors: []cloudtrail.EventSelector{ + { + Metadata: defsecTypes.NewTestMetadata(), + ReadWriteType: defsecTypes.String("All", defsecTypes.NewTestMetadata()), + DataResources: []cloudtrail.DataResource{ + { + Metadata: defsecTypes.NewTestMetadata(), + Type: defsecTypes.String("AWS::S3::Object", defsecTypes.NewTestMetadata()), + Values: []defsecTypes.StringValue{ + defsecTypes.String("arn:aws:s3:::test-bucket", defsecTypes.NewTestMetadata()), + }, + }, + }, + }, + }, + }, + }, + }, + expected: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var testState state.State + testState.AWS.S3 = test.s3 + testState.AWS.CloudTrail = test.cloudtrail + results := CheckEnableObjectReadLogging.Evaluate(&testState) + var found bool + for _, result := range results { + if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableObjectReadLogging.Rule().LongID() { + found = true + } + } + if test.expected { + assert.True(t, found, "Rule should have been found") + } else { + assert.False(t, found, "Rule should not have been found") + } + }) + } +} diff --git a/internal/rules/aws/s3/enable_object_write_logging.go b/internal/rules/aws/s3/enable_object_write_logging.go new file mode 100755 index 000000000..52f6c6c89 --- /dev/null +++ b/internal/rules/aws/s3/enable_object_write_logging.go @@ -0,0 +1,85 @@ +package s3 + +import ( + "fmt" + + "github.com/aquasecurity/defsec/internal/rules" + "github.com/aquasecurity/defsec/pkg/framework" + "github.com/aquasecurity/defsec/pkg/providers" + "github.com/aquasecurity/defsec/pkg/scan" + "github.com/aquasecurity/defsec/pkg/severity" + "github.com/aquasecurity/defsec/pkg/state" +) + +var CheckEnableObjectWriteLogging = rules.Register( + scan.Rule{ + AVDID: "AVD-AWS-0171", + Provider: providers.AWSProvider, + Service: "s3", + ShortCode: "enable-object-write-logging", + Frameworks: map[framework.Framework][]string{ + framework.CIS_AWS_1_4: {"3.10"}, + }, + Summary: "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.", + Impact: "Difficult/impossible to audit bucket object/data changes.", + Resolution: "Enable Object-level logging for S3 buckets.", + Explanation: ` +Enabling object-level logging will help you meet data compliance requirements within your organization, perform comprehensive security analysis, monitor specific patterns of user behavior in your AWS account or take immediate actions on any object-level API activity within your S3 Buckets using Amazon CloudWatch Events. +`, + Links: []string{ + "https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-cloudtrail-logging-for-s3.html", + }, + Severity: severity.Low, + Terraform: &scan.EngineMetadata{ + GoodExamples: terraformEnableObjectWriteLoggingGoodExamples, + BadExamples: terraformEnableObjectWriteLoggingBadExamples, + Links: terraformEnableObjectWriteLoggingLinks, + RemediationMarkdown: terraformEnableObjectWriteLoggingRemediationMarkdown, + }, + }, + func(s *state.State) (results scan.Results) { + for _, bucket := range s.AWS.S3.Buckets { + if !bucket.Name.GetMetadata().IsResolvable() { + continue + } + bucketName := bucket.Name.Value() + var hasWriteLogging bool + for _, trail := range s.AWS.CloudTrail.Trails { + for _, selector := range trail.EventSelectors { + if selector.ReadWriteType.EqualTo("ReadOnly") { + continue + } + for _, dataResource := range selector.DataResources { + if dataResource.Type.NotEqualTo("AWS::S3::Object") { + continue + } + for _, partialARN := range dataResource.Values { + partial := partialARN.Value() + if partial == "arn:aws:s3" { // logging for all of s3 is enabled + hasWriteLogging = true + break + } + // the slash is important as it enables logging for objects inside bucket + if partial == fmt.Sprintf("arn:aws:s3:::%s/", bucketName) { + hasWriteLogging = true + break + } + } + } + } + if hasWriteLogging { + break + } + } + if !hasWriteLogging { + results.Add( + "Bucket does not have object-level write logging enabled", + &bucket, + ) + } else { + results.AddPassed(&bucket) + } + } + return results + }, +) diff --git a/internal/rules/aws/s3/enable_object_write_logging.tf.go b/internal/rules/aws/s3/enable_object_write_logging.tf.go new file mode 100644 index 000000000..72f3389ce --- /dev/null +++ b/internal/rules/aws/s3/enable_object_write_logging.tf.go @@ -0,0 +1,33 @@ +package s3 + +var terraformEnableObjectWriteLoggingGoodExamples = []string{ + ` +resource "aws_s3_bucket" "good_example" { + bucket = "my-bucket" +} + +resource "aws_cloudtrail" "example" { + event_selector { + read_write_type = "WriteOnly" # or "All" + data_resource { + type = "AWS::S3::Object" + values = ["arn:aws:s3:::${aws_s3_bucket.good_example.bucket}/"] + } + } +} +`, +} + +var terraformEnableObjectWriteLoggingBadExamples = []string{ + ` +resource "aws_s3_bucket" "bad_example" { + bucket = "my-bucket" +} +`, +} + +var terraformEnableObjectWriteLoggingLinks = []string{ + `https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket#versioning`, +} + +var terraformEnableObjectWriteLoggingRemediationMarkdown = `` diff --git a/internal/rules/aws/s3/enable_object_write_logging_test.go b/internal/rules/aws/s3/enable_object_write_logging_test.go new file mode 100644 index 000000000..5381090c6 --- /dev/null +++ b/internal/rules/aws/s3/enable_object_write_logging_test.go @@ -0,0 +1,260 @@ +package s3 + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/defsec/pkg/types" + + "github.com/aquasecurity/defsec/pkg/state" + + "github.com/aquasecurity/defsec/pkg/providers/aws/cloudtrail" + "github.com/aquasecurity/defsec/pkg/providers/aws/s3" + "github.com/aquasecurity/defsec/pkg/scan" + + "github.com/stretchr/testify/assert" +) + +func TestCheckEnableObjectWriteLogging(t *testing.T) { + tests := []struct { + name string + s3 s3.S3 + cloudtrail cloudtrail.CloudTrail + expected bool + }{ + { + name: "S3 bucket with no cloudtrail logging", + s3: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: defsecTypes.NewTestMetadata(), + Name: defsecTypes.String("test-bucket", defsecTypes.NewTestMetadata()), + }, + }, + }, + expected: true, + }, + { + name: "S3 bucket with ReadOnly cloudtrail logging (all of s3)", + s3: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: defsecTypes.NewTestMetadata(), + Name: defsecTypes.String("test-bucket", defsecTypes.NewTestMetadata()), + }, + }, + }, + cloudtrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: defsecTypes.NewTestMetadata(), + EventSelectors: []cloudtrail.EventSelector{ + { + Metadata: defsecTypes.NewTestMetadata(), + ReadWriteType: defsecTypes.String("ReadOnly", defsecTypes.NewTestMetadata()), + DataResources: []cloudtrail.DataResource{ + { + Metadata: defsecTypes.NewTestMetadata(), + Type: defsecTypes.String("AWS::S3::Object", defsecTypes.NewTestMetadata()), + Values: []defsecTypes.StringValue{ + defsecTypes.String("arn:aws:s3", defsecTypes.NewTestMetadata()), + }, + }, + }, + }, + }, + }, + }, + }, + expected: true, + }, + { + name: "S3 bucket with WriteOnly cloudtrail logging (all of s3)", + s3: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: defsecTypes.NewTestMetadata(), + Name: defsecTypes.String("test-bucket", defsecTypes.NewTestMetadata()), + }, + }, + }, + cloudtrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: defsecTypes.NewTestMetadata(), + EventSelectors: []cloudtrail.EventSelector{ + { + Metadata: defsecTypes.NewTestMetadata(), + ReadWriteType: defsecTypes.String("WriteOnly", defsecTypes.NewTestMetadata()), + DataResources: []cloudtrail.DataResource{ + { + Metadata: defsecTypes.NewTestMetadata(), + Type: defsecTypes.String("AWS::S3::Object", defsecTypes.NewTestMetadata()), + Values: []defsecTypes.StringValue{ + defsecTypes.String("arn:aws:s3", defsecTypes.NewTestMetadata()), + }, + }, + }, + }, + }, + }, + }, + }, + expected: false, + }, + { + name: "S3 bucket with 'All' cloudtrail logging (all of s3)", + s3: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: defsecTypes.NewTestMetadata(), + Name: defsecTypes.String("test-bucket", defsecTypes.NewTestMetadata()), + }, + }, + }, + cloudtrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: defsecTypes.NewTestMetadata(), + EventSelectors: []cloudtrail.EventSelector{ + { + Metadata: defsecTypes.NewTestMetadata(), + ReadWriteType: defsecTypes.String("All", defsecTypes.NewTestMetadata()), + DataResources: []cloudtrail.DataResource{ + { + Metadata: defsecTypes.NewTestMetadata(), + Type: defsecTypes.String("AWS::S3::Object", defsecTypes.NewTestMetadata()), + Values: []defsecTypes.StringValue{ + defsecTypes.String("arn:aws:s3", defsecTypes.NewTestMetadata()), + }, + }, + }, + }, + }, + }, + }, + }, + expected: false, + }, + { + name: "S3 bucket with 'All' cloudtrail logging (only this bucket)", + s3: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: defsecTypes.NewTestMetadata(), + Name: defsecTypes.String("test-bucket", defsecTypes.NewTestMetadata()), + }, + }, + }, + cloudtrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: defsecTypes.NewTestMetadata(), + EventSelectors: []cloudtrail.EventSelector{ + { + Metadata: defsecTypes.NewTestMetadata(), + ReadWriteType: defsecTypes.String("All", defsecTypes.NewTestMetadata()), + DataResources: []cloudtrail.DataResource{ + { + Metadata: defsecTypes.NewTestMetadata(), + Type: defsecTypes.String("AWS::S3::Object", defsecTypes.NewTestMetadata()), + Values: []defsecTypes.StringValue{ + defsecTypes.String("arn:aws:s3:::test-bucket/", defsecTypes.NewTestMetadata()), + }, + }, + }, + }, + }, + }, + }, + }, + expected: false, + }, + { + name: "S3 bucket with 'All' cloudtrail logging (only another bucket)", + s3: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: defsecTypes.NewTestMetadata(), + Name: defsecTypes.String("test-bucket", defsecTypes.NewTestMetadata()), + }, + }, + }, + cloudtrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: defsecTypes.NewTestMetadata(), + EventSelectors: []cloudtrail.EventSelector{ + { + Metadata: defsecTypes.NewTestMetadata(), + ReadWriteType: defsecTypes.String("All", defsecTypes.NewTestMetadata()), + DataResources: []cloudtrail.DataResource{ + { + Metadata: defsecTypes.NewTestMetadata(), + Type: defsecTypes.String("AWS::S3::Object", defsecTypes.NewTestMetadata()), + Values: []defsecTypes.StringValue{ + defsecTypes.String("arn:aws:s3:::test-bucket2/", defsecTypes.NewTestMetadata()), + }, + }, + }, + }, + }, + }, + }, + }, + expected: true, + }, + { + name: "S3 bucket with 'All' cloudtrail logging (this bucket, missing slash)", + s3: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: defsecTypes.NewTestMetadata(), + Name: defsecTypes.String("test-bucket", defsecTypes.NewTestMetadata()), + }, + }, + }, + cloudtrail: cloudtrail.CloudTrail{ + Trails: []cloudtrail.Trail{ + { + Metadata: defsecTypes.NewTestMetadata(), + EventSelectors: []cloudtrail.EventSelector{ + { + Metadata: defsecTypes.NewTestMetadata(), + ReadWriteType: defsecTypes.String("All", defsecTypes.NewTestMetadata()), + DataResources: []cloudtrail.DataResource{ + { + Metadata: defsecTypes.NewTestMetadata(), + Type: defsecTypes.String("AWS::S3::Object", defsecTypes.NewTestMetadata()), + Values: []defsecTypes.StringValue{ + defsecTypes.String("arn:aws:s3:::test-bucket", defsecTypes.NewTestMetadata()), + }, + }, + }, + }, + }, + }, + }, + }, + expected: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var testState state.State + testState.AWS.S3 = test.s3 + testState.AWS.CloudTrail = test.cloudtrail + results := CheckEnableObjectWriteLogging.Evaluate(&testState) + var found bool + for _, result := range results { + if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableObjectWriteLogging.Rule().LongID() { + found = true + } + } + if test.expected { + assert.True(t, found, "Rule should have been found") + } else { + assert.False(t, found, "Rule should not have been found") + } + }) + } +} diff --git a/internal/rules/aws/s3/require_mfa_delete.go b/internal/rules/aws/s3/require_mfa_delete.go new file mode 100755 index 000000000..b6d931542 --- /dev/null +++ b/internal/rules/aws/s3/require_mfa_delete.go @@ -0,0 +1,51 @@ +package s3 + +import ( + "github.com/aquasecurity/defsec/internal/rules" + "github.com/aquasecurity/defsec/pkg/framework" + "github.com/aquasecurity/defsec/pkg/providers" + "github.com/aquasecurity/defsec/pkg/scan" + "github.com/aquasecurity/defsec/pkg/severity" + "github.com/aquasecurity/defsec/pkg/state" +) + +var CheckRequireMFADelete = rules.Register( + scan.Rule{ + AVDID: "AVD-AWS-0170", + Provider: providers.AWSProvider, + Service: "s3", + ShortCode: "require-mfa-delete", + Frameworks: map[framework.Framework][]string{ + framework.CIS_AWS_1_4: {"2.1.3"}, + }, + Summary: "Buckets should have MFA deletion protection enabled.", + Impact: "Lessened protection against accidental/malicious deletion of data", + Resolution: "Enable MFA deletion protection on the bucket", + Explanation: ` +Adding MFA delete to an S3 bucket, requires additional authentication when you change the version state of your bucket or you delete an object version, adding another layer of security in the event your security credentials are compromised or unauthorized access is obtained. +`, + Links: []string{ + "https://docs.aws.amazon.com/AmazonS3/latest/userguide/MultiFactorAuthenticationDelete.html", + }, + Severity: severity.Low, + Terraform: &scan.EngineMetadata{ + GoodExamples: terraformRequireMFADeleteGoodExamples, + BadExamples: terraformRequireMFADeleteBadExamples, + Links: terraformRequireMFADeleteLinks, + RemediationMarkdown: terraformRequireMFADeleteRemediationMarkdown, + }, + }, + func(s *state.State) (results scan.Results) { + for _, bucket := range s.AWS.S3.Buckets { + if bucket.Versioning.MFADelete.IsFalse() { + results.Add( + "Bucket does not have MFA deletion protection enabled", + bucket.Versioning.MFADelete, + ) + } else { + results.AddPassed(&bucket) + } + } + return results + }, +) diff --git a/internal/rules/aws/s3/require_mfa_delete.tf.go b/internal/rules/aws/s3/require_mfa_delete.tf.go new file mode 100644 index 000000000..d1a5e6e8e --- /dev/null +++ b/internal/rules/aws/s3/require_mfa_delete.tf.go @@ -0,0 +1,41 @@ +package s3 + +var terraformRequireMFADeleteGoodExamples = []string{ + ` +resource "aws_s3_bucket" "example" { + bucket = "bucket" +} + +resource "aws_s3_bucket_versioning" "good_example" { + bucket = aws_s3_bucket.example.id + + versioning_configuration { + status = "Enabled" + mfa_delete = "Enabled" + } +} + `, +} + +var terraformRequireMFADeleteBadExamples = []string{ + ` +resource "aws_s3_bucket" "example" { + bucket = "bucket" +} + +resource "aws_s3_bucket_versioning" "good_example" { + bucket = aws_s3_bucket.example.id + + versioning_configuration { + status = "Enabled" + mfa_delete = "Disabled" + } +} + `, +} + +var terraformRequireMFADeleteLinks = []string{ + `https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_versioning`, +} + +var terraformRequireMFADeleteRemediationMarkdown = `` diff --git a/internal/rules/aws/s3/require_mfa_delete_test.go b/internal/rules/aws/s3/require_mfa_delete_test.go new file mode 100644 index 000000000..f17ef15e8 --- /dev/null +++ b/internal/rules/aws/s3/require_mfa_delete_test.go @@ -0,0 +1,89 @@ +package s3 + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/defsec/pkg/types" + + "github.com/aquasecurity/defsec/pkg/state" + + "github.com/aquasecurity/defsec/pkg/providers/aws/s3" + "github.com/aquasecurity/defsec/pkg/scan" + + "github.com/stretchr/testify/assert" +) + +func TestCheckRequireMFADelete(t *testing.T) { + tests := []struct { + name string + input s3.S3 + expected bool + }{ + { + name: "RequireMFADelete is not set", + input: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: defsecTypes.NewTestMetadata(), + Versioning: s3.Versioning{ + Metadata: defsecTypes.NewTestMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMetadata()), + MFADelete: defsecTypes.BoolUnresolvable(defsecTypes.NewTestMetadata()), + }, + }, + }, + }, + expected: false, + }, + { + name: "RequireMFADelete is false", + input: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: defsecTypes.NewTestMetadata(), + Versioning: s3.Versioning{ + Metadata: defsecTypes.NewTestMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMetadata()), + MFADelete: defsecTypes.Bool(false, defsecTypes.NewTestMetadata()), + }, + }, + }, + }, + expected: true, + }, + { + name: "RequireMFADelete is true", + input: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: defsecTypes.NewTestMetadata(), + Versioning: s3.Versioning{ + Metadata: defsecTypes.NewTestMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMetadata()), + MFADelete: defsecTypes.Bool(true, defsecTypes.NewTestMetadata()), + }, + }, + }, + }, + expected: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var testState state.State + testState.AWS.S3 = test.input + results := CheckRequireMFADelete.Evaluate(&testState) + var found bool + for _, result := range results { + if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckRequireMFADelete.Rule().LongID() { + found = true + } + } + if test.expected { + assert.True(t, found, "Rule should have been found") + } else { + assert.False(t, found, "Rule should not have been found") + } + }) + } +} diff --git a/pkg/providers/aws/cloudtrail/cloudtrail.go b/pkg/providers/aws/cloudtrail/cloudtrail.go index 0105eba3d..996b1a70a 100755 --- a/pkg/providers/aws/cloudtrail/cloudtrail.go +++ b/pkg/providers/aws/cloudtrail/cloudtrail.go @@ -26,4 +26,17 @@ type Trail struct { CloudWatchLogsLogGroupArn defsecTypes.StringValue IsLogging defsecTypes.BoolValue BucketName defsecTypes.StringValue + EventSelectors []EventSelector +} + +type EventSelector struct { + defsecTypes.Metadata + DataResources []DataResource + ReadWriteType defsecTypes.StringValue // ReadOnly, WriteOnly, All. Default value is All for TF. +} + +type DataResource struct { + defsecTypes.Metadata + Type defsecTypes.StringValue // You can specify only the following value: "AWS::S3::Object", "AWS::Lambda::Function" and "AWS::DynamoDB::Table". + Values defsecTypes.StringValueList // List of ARNs/partial ARNs - e.g. arn:aws:s3:::/ for all objects in a bucket, arn:aws:s3:::/key for specific objects } diff --git a/pkg/providers/aws/s3/bucket.go b/pkg/providers/aws/s3/bucket.go index eafb53165..26072870d 100755 --- a/pkg/providers/aws/s3/bucket.go +++ b/pkg/providers/aws/s3/bucket.go @@ -37,7 +37,8 @@ type Logging struct { type Versioning struct { defsecTypes.Metadata - Enabled defsecTypes.BoolValue + Enabled defsecTypes.BoolValue + MFADelete defsecTypes.BoolValue } type Encryption struct { diff --git a/pkg/scanners/cloudformation/scanner.go b/pkg/scanners/cloudformation/scanner.go index fc2d95706..02e9da568 100644 --- a/pkg/scanners/cloudformation/scanner.go +++ b/pkg/scanners/cloudformation/scanner.go @@ -195,8 +195,7 @@ func (s *Scanner) scanFileContext(ctx context.Context, regoScanner *rego.Scanner ref = scanResult.Metadata().Parent().Reference() } - reference := ref.(*parser.CFReference) - description := getDescription(scanResult, reference) + description := getDescription(scanResult, ref) scanResult.OverrideDescription(description) results = append(results, scanResult) } @@ -215,7 +214,11 @@ func (s *Scanner) scanFileContext(ctx context.Context, regoScanner *rego.Scanner return append(results, regoResults...), nil } -func getDescription(scanResult scan.Result, location *parser.CFReference) string { +type logicalIDProvider interface { + LogicalID() string +} + +func getDescription(scanResult scan.Result, location logicalIDProvider) string { switch scanResult.Status() { case scan.StatusPassed: return fmt.Sprintf("Resource '%s' passed check: %s", location.LogicalID(), scanResult.Rule().Summary) diff --git a/pkg/scanners/terraform/scanner.go b/pkg/scanners/terraform/scanner.go index cdf68d02f..815dbf4cf 100644 --- a/pkg/scanners/terraform/scanner.go +++ b/pkg/scanners/terraform/scanner.go @@ -176,7 +176,7 @@ func (s *Scanner) ScanFSWithMetrics(ctx context.Context, target fs.FS, dir strin } s.execLock.Lock() - s.executorOpt = append(s.executorOpt, executor.OptionWithRegoScanner(regoScanner)) + s.executorOpt = append(s.executorOpt, executor.OptionWithRegoScanner(regoScanner), executor.OptionWithFrameworks(s.frameworks...)) s.execLock.Unlock() var allResults scan.Results diff --git a/test/rules_test.go b/test/rules_test.go index 64a09343b..d841b80eb 100644 --- a/test/rules_test.go +++ b/test/rules_test.go @@ -7,14 +7,12 @@ import ( "strings" "testing" - "github.com/aquasecurity/defsec/pkg/framework" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/aquasecurity/defsec/internal/rules" - + "github.com/aquasecurity/defsec/pkg/framework" + "github.com/aquasecurity/defsec/pkg/scanners/options" "github.com/aquasecurity/defsec/test/testutil" ) @@ -35,7 +33,7 @@ func TestAVDIDs(t *testing.T) { } func TestRulesAgainstExampleCode(t *testing.T) { - for _, rule := range rules.GetFrameworkRules() { + for _, rule := range rules.GetFrameworkRules(framework.ALL) { testName := fmt.Sprintf("%s/%s", rule.Rule().AVDID, rule.Rule().LongID()) t.Run(testName, func(t *testing.T) { rule := rule @@ -52,7 +50,7 @@ func TestRulesAgainstExampleCode(t *testing.T) { t.Run("terraform: good examples", func(t *testing.T) { for i, example := range rule.Rule().Terraform.GoodExamples { t.Run(fmt.Sprintf("example %d", i), func(t *testing.T) { - results := scanHCL(t, example) + results := scanHCL(t, example, options.ScannerWithFrameworks(framework.ALL)) testutil.AssertRuleNotFound(t, rule.Rule().LongID(), results, "Rule %s was detected in good example #%d:\n%s", rule.Rule().LongID(), i, example) }) } @@ -60,12 +58,14 @@ func TestRulesAgainstExampleCode(t *testing.T) { t.Run("terraform: bad examples", func(t *testing.T) { for i, example := range rule.Rule().Terraform.BadExamples { t.Run(fmt.Sprintf("example %d", i), func(t *testing.T) { - results := scanHCL(t, example) + results := scanHCL(t, example, options.ScannerWithFrameworks(framework.ALL)) testutil.AssertRuleFound(t, rule.Rule().LongID(), results, "Rule %s was not detected in bad example #%d:\n%s", rule.Rule().LongID(), i, example) for _, result := range results.GetFailed() { - code, err := result.GetCode() - require.NoError(t, err) - assert.Greater(t, len(code.Lines), 0) + if result.Rule().LongID() == rule.Rule().LongID() { + code, err := result.GetCode() + require.NoError(t, err) + assert.Greater(t, len(code.Lines), 0) + } } }) } @@ -75,7 +75,7 @@ func TestRulesAgainstExampleCode(t *testing.T) { t.Run("cloudformation: good examples", func(t *testing.T) { for i, example := range rule.Rule().CloudFormation.GoodExamples { t.Run(fmt.Sprintf("example %d", i), func(t *testing.T) { - results := scanCF(t, example) + results := scanCF(t, example, options.ScannerWithFrameworks(framework.ALL)) testutil.AssertRuleNotFound(t, rule.Rule().LongID(), results, "Rule %s was detected in good example #%d:\n%s", rule.Rule().LongID(), i, example) }) } @@ -83,12 +83,14 @@ func TestRulesAgainstExampleCode(t *testing.T) { t.Run("cloudformation: bad examples", func(t *testing.T) { for i, example := range rule.Rule().CloudFormation.BadExamples { t.Run(fmt.Sprintf("example %d", i), func(t *testing.T) { - results := scanCF(t, example) + results := scanCF(t, example, options.ScannerWithFrameworks(framework.ALL)) testutil.AssertRuleFound(t, rule.Rule().LongID(), results, "Rule %s was not detected in bad example #%d:\n%s", rule.Rule().LongID(), i, example) for _, result := range results.GetFailed() { - code, err := result.GetCode() - require.NoError(t, err) - assert.Greater(t, len(code.Lines), 0) + if result.Rule().LongID() == rule.Rule().LongID() { + code, err := result.GetCode() + require.NoError(t, err) + assert.Greater(t, len(code.Lines), 0) + } } }) }