diff --git a/.ci/jobs/docker-compose.yml b/.ci/jobs/docker-compose.yml new file mode 100644 index 000000000000..e9fc43ff7046 --- /dev/null +++ b/.ci/jobs/docker-compose.yml @@ -0,0 +1,23 @@ +version: '2.3' +services: + # This is a proxy used to block beats until all services are healthy. + # See: https://github.com/docker/compose/issues/4369 + proxy_dep: + image: busybox + depends_on: + localstack: { condition: service_healthy } + + localstack: + container_name: "${localstack_integration_test_container}" + image: localstack/localstack:2.1.0 # Latest stable release + ports: + - "127.0.0.1:4566:4566" # LocalStack Gateway + environment: + - DEBUG=1 + - DOCKER_HOST=unix:///var/run/docker.sock + - LOCALSTACK_HOST=localhost + - S3_HOSTNAME=localhost + - PROVIDER_OVERRIDE_S3=asf + volumes: + - "${LOCALSTACK_VOLUME_DIR:-./volume}:/var/lib/localstack" + - "/var/run/docker.sock:/var/run/docker.sock" diff --git a/.ci/scripts/docker-services-cleanup.sh b/.ci/scripts/docker-services-cleanup.sh new file mode 100755 index 000000000000..cc182413a2e6 --- /dev/null +++ b/.ci/scripts/docker-services-cleanup.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -exuo pipefail + +${HOME}/bin/docker-compose -f .ci/jobs/docker-compose.yml down -v + +exit $? diff --git a/.ci/scripts/install-docker-services.sh b/.ci/scripts/install-docker-services.sh new file mode 100755 index 000000000000..420362f83557 --- /dev/null +++ b/.ci/scripts/install-docker-services.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -exuo pipefail + +${HOME}/bin/docker-compose -f .ci/jobs/docker-compose.yml up -d + +exit $? diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 49595dea612b..59d25cec04a2 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -356,6 +356,7 @@ automatic splitting at root level, if root level element is an array. {pull}3415 - Add `clean_session` configuration setting for MQTT input. {pull}35806[16204] - Add fingerprint mode for the filestream scanner and new file identity based on it {issue}34419[34419] {pull}35734[35734] - Add file system metadata to events ingested via filestream {issue}35801[35801] {pull}36065[36065] +- Add support for localstack based input integration testing {pull}35727[35727] - Allow parsing bytes in and bytes out as long integer in CEF processor. {issue}36100[36100] {pull}36108[36108] - Add support for registered owners and users to AzureAD entity analytics provider. {pull}36092[36092] diff --git a/Jenkinsfile b/Jenkinsfile index cdee0f662cdb..a0231b94957c 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -618,7 +618,7 @@ def targetWithoutNode(Map args = [:]) { cmd(label: "${args.id?.trim() ? args.id : env.STAGE_NAME} - ${command}", script: "${command}") } } else { - cmd(label: "${args.id?.trim() ? args.id : env.STAGE_NAME} - ${command}", script: "${command}") + cmd(label: "${args.id?.trim() ? args.id : env.STAGE_NAME} - ${command}", script: "${command}") } } } @@ -934,6 +934,8 @@ def startCloudTestEnv(Map args = [:]) { stage("${name}-prepare-cloud-env"){ withBeatsEnv(archive: false, withModule: false) { try { + // Run the docker services to setup the emulated cloud environment + sh(label: 'Run docker-compose services for emulated cloud env', script: ".ci/scripts/install-docker-services.sh ", returnStatus: true) dirs?.each { folder -> retryWithSleep(retries: 2, seconds: 5, backoff: true){ terraformApply(folder) @@ -944,9 +946,12 @@ def startCloudTestEnv(Map args = [:]) { // If it failed then cleanup without failing the build sh(label: 'Terraform Cleanup', script: ".ci/scripts/terraform-cleanup.sh ${folder}", returnStatus: true) } + // Cleanup the docker services + sh(label: 'Docker Compose Cleanup', script: ".ci/scripts/docker-services-cleanup.sh", returnStatus: true) + error('startCloudTestEnv: terraform apply failed.') } finally { - dirs?.each { folder -> + dirs?.each { folder -> // Archive terraform states in case manual cleanup is needed. archiveArtifacts(allowEmptyArchive: true, artifacts: '**/terraform.tfstate') dir("${folder}") { @@ -978,6 +983,7 @@ def terraformApply(String directory) { * Tear down the terraform environments, by looking for all terraform states in directory * then it runs terraform destroy for each one. * It uses terraform states previously stashed by startCloudTestEnv. +* This also tears down any associated docker services */ def terraformCleanup(Map args = [:]) { String name = normalise(args.name) @@ -988,6 +994,8 @@ def terraformCleanup(Map args = [:]) { retryWithSleep(retries: 2, seconds: 5, backoff: true) { sh(label: "Terraform Cleanup", script: ".ci/scripts/terraform-cleanup.sh ${directory}") } + // Cleanup associated docker services + sh(label: 'Docker Compose Cleanup', script: ".ci/scripts/docker-services-cleanup.sh") } } } diff --git a/x-pack/filebeat/input/awss3/_meta/terraform/.gitignore b/x-pack/filebeat/input/awss3/_meta/terraform/.gitignore index 0825744a7760..1af7b09a151d 100644 --- a/x-pack/filebeat/input/awss3/_meta/terraform/.gitignore +++ b/x-pack/filebeat/input/awss3/_meta/terraform/.gitignore @@ -1,3 +1,3 @@ terraform/ -outputs.yml +outputs*.yml *.tfstate* diff --git a/x-pack/filebeat/input/awss3/_meta/terraform/README.md b/x-pack/filebeat/input/awss3/_meta/terraform/README.md index d5614b99a92e..41100d98dad4 100644 --- a/x-pack/filebeat/input/awss3/_meta/terraform/README.md +++ b/x-pack/filebeat/input/awss3/_meta/terraform/README.md @@ -5,6 +5,8 @@ for executing the integration tests for the `aws-s3` Filebeat input. It creates an S3 bucket and SQS queue and configures S3 `ObjectCreated:*` notifications to be delivered to SQS. It also creates a second S3 bucket, SNS topic, SQS queue and configures S3 `ObjectCreated:*` notifications to be delivered to SNS and also creates a subscription for this SNS topic to SQS queue to automatically place messages sent to SNS topic in SQS queue. +## Cloud AWS environment + It outputs configuration information that is consumed by the tests to `outputs.yml`. The AWS resources are randomly named to prevent name collisions between multiple users. @@ -42,4 +44,40 @@ the S3 bucket and its contents. `terraform destroy` +## Emulated cloud Localstack environment + +It outputs configuration information that is consumed by the tests to +`outputs-localstack.yml`. The AWS resources are randomly named to prevent name collisions +between multiple users. + +### Usage + +You must have the appropriate Localstack environment up and running in docker. +You can use `.ci/jobs/docker-compose.yml` to spin up localstack environment. + +1. Execute terraform in this directory to create the resources. This will also +write the `outputs-localstack.yml`. You can use `export TF_VAR_aws_region=NNNNN` in order +to match the AWS region of the profile you are using. + + `terraform apply` + + +2. (Optional) View the output configuration. + + ```yaml + "aws_region": "us-east-1" + "bucket_name": "filebeat-s3-integtest-8iok1h" + "queue_url": "https://localhost:4566/000000000000/filebeat-s3-integtest-8iok1h" + ``` + +4. Execute the integration test. + + ``` + cd x-pack/filebeat/input/awss3 + go test -tags aws,integration -run TestInputRun*Localstack* -v . + ``` + +5. Cleanup AWS resources. Execute terraform to remove the SQS queue and delete +the S3 bucket and its contents. + `terraform destroy` \ No newline at end of file diff --git a/x-pack/filebeat/input/awss3/_meta/terraform/localstack.tf b/x-pack/filebeat/input/awss3/_meta/terraform/localstack.tf new file mode 100644 index 000000000000..2d2fcbe09ecf --- /dev/null +++ b/x-pack/filebeat/input/awss3/_meta/terraform/localstack.tf @@ -0,0 +1,89 @@ +provider "aws" { + alias = "localstack" + access_key = "bharat" + secret_key = "bharat" + region = "us-east-1" + s3_use_path_style = true + skip_credentials_validation = true + skip_metadata_api_check = true + skip_requesting_account_id = true + + endpoints { + apigateway = "http://localhost:4566" + apigatewayv2 = "http://localhost:4566" + cloudformation = "http://localhost:4566" + cloudwatch = "http://localhost:4566" + dynamodb = "http://localhost:4566" + ec2 = "http://localhost:4566" + es = "http://localhost:4566" + elasticache = "http://localhost:4566" + firehose = "http://localhost:4566" + iam = "http://localhost:4566" + kinesis = "http://localhost:4566" + lambda = "http://localhost:4566" + rds = "http://localhost:4566" + redshift = "http://localhost:4566" + route53 = "http://localhost:4566" + s3 = "http://localhost:4566" + secretsmanager = "http://localhost:4566" + ses = "http://localhost:4566" + sns = "http://localhost:4566" + sqs = "http://localhost:4566" + ssm = "http://localhost:4566" + stepfunctions = "http://localhost:4566" + sts = "http://localhost:4566" + } +} + +resource "random_string" "random_localstack" { + length = 6 + special = false + upper = false +} + +resource "aws_s3_bucket" "filebeat-integtest-localstack" { + provider = aws.localstack + bucket = "filebeat-s3-integtest-localstack-${random_string.random_localstack.result}" + force_destroy = true +} + +resource "aws_sqs_queue" "filebeat-integtest-localstack" { + provider = aws.localstack + name = "filebeat-sqs-integtest-localstack-${random_string.random_localstack.result}" + policy = <