diff --git a/.circleci/config.yml b/.circleci/config.yml
index 8a344ecdd6f96..469b1f99d9747 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -15,7 +15,7 @@ experimental:
templates:
job_template: &job_template
docker:
- - image: gcr.io/datadoghq/agent-circleci-runner:v32015963-09a32ddb
+ - image: gcr.io/datadoghq/agent-circleci-runner:v47817366-5fdf0325
environment:
USE_SYSTEM_LIBS: "1"
working_directory: /go/src/github.com/DataDog/datadog-agent
@@ -245,18 +245,6 @@ jobs:
name: build agent
command: inv -e agent.build --exclude-rtloader
- build_iot_agent:
- <<: *job_template
- steps:
- - restore_cache: *restore_source
- - restore_cache: *restore_deps
- - run:
- name: build iot agent
- command: inv -e agent.build --flavor iot
- - run:
- name: test iot agent
- command: DD_HOSTNAME=test-circleci-hostname ./bin/agent/agent -c ./bin/agent/dist check cpu
-
documentation_generation:
<<: *job_template
steps:
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index ecb11e886cf17..29be953abadba 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,47 +1,45 @@
---
include:
- - /.gitlab/.pre/cancel-prev-pipelines.yml
- - /.gitlab/benchmarks/include.yml
- - /.gitlab/binary_build/include.yml
- - /.gitlab/check_deploy/check_deploy.yml
- - /.gitlab/check_merge/do_not_merge.yml
- - /.gitlab/choco_build/choco_build.yml
- - /.gitlab/choco_deploy/choco_deploy.yml
- - /.gitlab/common/shared.yml
- - /.gitlab/common/pr_commenter.yml
- - /.gitlab/container_build/include.yml
- - /.gitlab/container_scan/container_scan.yml
- - /.gitlab/deploy_containers/deploy_containers.yml
- - /.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml
- - /.gitlab/deploy_dca/deploy_dca.yml
- - /.gitlab/deploy_packages/include.yml
- - /.gitlab/deps_build/deps_build.yml
- - /.gitlab/deps_fetch/deps_fetch.yml
- - /.gitlab/dev_container_deploy/include.yml
- - /.gitlab/e2e/e2e.yml
- - /.gitlab/e2e_pre_test/e2e_pre_test.yml
- - /.gitlab/functional_test/include.yml
- - /.gitlab/functional_test_cleanup/functional_test_cleanup.yml
- - /.gitlab/install_script_testing/install_script_testing.yml
- - /.gitlab/integration_test/include.yml
- - /.gitlab/internal_image_deploy/internal_image_deploy.yml
- - /.gitlab/internal_kubernetes_deploy/include.yml
- - /.gitlab/junit_upload/junit_upload.yml
- - /.gitlab/kitchen_cleanup/include.yml
- - /.gitlab/kitchen_deploy/kitchen_deploy.yml
- - /.gitlab/kitchen_testing/include.yml
- - /.gitlab/lint/include.yml
- - /.gitlab/maintenance_jobs/include.yml
- - /.gitlab/notify/notify.yml
- - /.gitlab/package_build/include.yml
- - /.gitlab/packaging/include.yml
- - /.gitlab/package_deps_build/package_deps_build.yml
- - /.gitlab/pkg_metrics/pkg_metrics.yml
- - /.gitlab/post_rc_build/post_rc_tasks.yml
- - /.gitlab/setup/setup.yml
- - /.gitlab/software_composition_analysis/software_composition_analysis.yml
- - /.gitlab/source_test/include.yml
- - /.gitlab/trigger_release/trigger_release.yml
+ - .gitlab/.pre/cancel-prev-pipelines.yml
+ - .gitlab/.pre/test_gitlab_configuration.yml
+ - .gitlab/benchmarks/include.yml
+ - .gitlab/binary_build/include.yml
+ - .gitlab/check_deploy/check_deploy.yml
+ - .gitlab/check_merge/do_not_merge.yml
+ - .gitlab/common/shared.yml
+ - .gitlab/common/pr_commenter.yml
+ - .gitlab/container_build/include.yml
+ - .gitlab/container_scan/container_scan.yml
+ - .gitlab/deploy_containers/deploy_containers.yml
+ - .gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml
+ - .gitlab/deploy_dca/deploy_dca.yml
+ - .gitlab/deploy_packages/include.yml
+ - .gitlab/deps_build/deps_build.yml
+ - .gitlab/deps_fetch/deps_fetch.yml
+ - .gitlab/dev_container_deploy/include.yml
+ - .gitlab/e2e/e2e.yml
+ - .gitlab/e2e_pre_test/e2e_pre_test.yml
+ - .gitlab/functional_test/include.yml
+ - .gitlab/functional_test_cleanup/functional_test_cleanup.yml
+ - .gitlab/install_script_testing/install_script_testing.yml
+ - .gitlab/integration_test/include.yml
+ - .gitlab/internal_image_deploy/internal_image_deploy.yml
+ - .gitlab/internal_kubernetes_deploy/include.yml
+ - .gitlab/junit_upload/junit_upload.yml
+ - .gitlab/kitchen_cleanup/include.yml
+ - .gitlab/kitchen_deploy/kitchen_deploy.yml
+ - .gitlab/kitchen_testing/include.yml
+ - .gitlab/lint/include.yml
+ - .gitlab/maintenance_jobs/include.yml
+ - .gitlab/notify/notify.yml
+ - .gitlab/package_build/include.yml
+ - .gitlab/package_deps_build/package_deps_build.yml
+ - .gitlab/pkg_metrics/pkg_metrics.yml
+ - .gitlab/post_rc_build/post_rc_tasks.yml
+ - .gitlab/setup/setup.yml
+ - .gitlab/software_composition_analysis/software_composition_analysis.yml
+ - .gitlab/source_test/include.yml
+ - .gitlab/trigger_release/trigger_release.yml
default:
retry:
@@ -136,7 +134,6 @@ variables:
DEB_TESTING_S3_BUCKET: apttesting.datad0g.com
RPM_TESTING_S3_BUCKET: yumtesting.datad0g.com
WINDOWS_TESTING_S3_BUCKET_A6: pipelines/A6/$CI_PIPELINE_ID
- WINDOWS_TESTING_S3_BUCKET_A7: pipelines/A7/$CI_PIPELINE_ID
WINDOWS_BUILDS_S3_BUCKET: $WIN_S3_BUCKET/builds
DEB_RPM_TESTING_BUCKET_BRANCH: testing # branch of the DEB_TESTING_S3_BUCKET and RPM_TESTING_S3_BUCKET repos to release to, 'testing'
S3_CP_OPTIONS: --only-show-errors --region us-east-1 --sse AES256
@@ -157,36 +154,35 @@ variables:
GENERAL_ARTIFACTS_CACHE_BUCKET_URL: https://dd-agent-omnibus.s3.amazonaws.com
S3_DSD6_URI: s3://dsd6-staging
RELEASE_VERSION_6: nightly
- RELEASE_VERSION_7: nightly-a7
# Build images versions
# To use images from datadog-agent-buildimages dev branches, set the corresponding
# SUFFIX variable to _test_only
DATADOG_AGENT_BUILDIMAGES_SUFFIX: ""
- DATADOG_AGENT_BUILDIMAGES: v32015963-09a32ddb
+ DATADOG_AGENT_BUILDIMAGES: v47817366-5fdf0325
DATADOG_AGENT_WINBUILDIMAGES_SUFFIX: ""
- DATADOG_AGENT_WINBUILDIMAGES: v32015963-09a32ddb
+ DATADOG_AGENT_WINBUILDIMAGES: v47817366-5fdf0325
DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX: ""
- DATADOG_AGENT_ARMBUILDIMAGES: v32015963-09a32ddb
+ DATADOG_AGENT_ARMBUILDIMAGES: v47817366-5fdf0325
DATADOG_AGENT_SYSPROBE_BUILDIMAGES_SUFFIX: ""
- DATADOG_AGENT_SYSPROBE_BUILDIMAGES: v32015963-09a32ddb
+ DATADOG_AGENT_SYSPROBE_BUILDIMAGES: v47817366-5fdf0325
DATADOG_AGENT_KERNEL_MATRIX_TESTING_BUILDIMAGES_SUFFIX: ""
- DATADOG_AGENT_KERNEL_MATRIX_TESTING_BUILDIMAGES: v32015963-09a32ddb
+ DATADOG_AGENT_KERNEL_MATRIX_TESTING_BUILDIMAGES: v47817366-5fdf0325
DATADOG_AGENT_NIKOS_BUILDIMAGES_SUFFIX: ""
- DATADOG_AGENT_NIKOS_BUILDIMAGES: v32015963-09a32ddb
+ DATADOG_AGENT_NIKOS_BUILDIMAGES: v47817366-5fdf0325
DATADOG_AGENT_BTF_GEN_BUILDIMAGES_SUFFIX: ""
- DATADOG_AGENT_BTF_GEN_BUILDIMAGES: v32015963-09a32ddb
+ DATADOG_AGENT_BTF_GEN_BUILDIMAGES: v47817366-5fdf0325
# To use images from test-infra-definitions dev branches, set the SUFFIX variable to -dev
# and check the job creating the image to make sure you have the right SHA prefix
TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX: ""
# Make sure to update test-infra-definitions version in go.mod as well
- TEST_INFRA_DEFINITIONS_BUILDIMAGES: 3eac4b5bb0c4
+ TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0a48ed729822
DATADOG_AGENT_BUILDERS: v28719426-b6a4fd9
DATADOG_AGENT_EMBEDDED_PATH: /opt/datadog-agent/embedded
- DEB_GPG_KEY_ID: ad9589b7
- DEB_GPG_KEY_NAME: "Datadog, Inc. Master key"
- RPM_GPG_KEY_ID: fd4bf915
+ DEB_GPG_KEY_ID: c0962c7d
+ DEB_GPG_KEY_NAME: "Datadog, Inc. APT key"
+ RPM_GPG_KEY_ID: b01082d3
RPM_GPG_KEY_NAME: "Datadog, Inc. RPM key"
DOCKER_REGISTRY_URL: docker.io
KITCHEN_INFRASTRUCTURE_FLAKES_RETRY: 2
@@ -213,8 +209,8 @@ variables:
ARTIFACTORY_BYPASS_SSM_NAME: ci.datadog-agent.artifactory_bypass # agent-ci-experience
CHANGELOG_COMMIT_SHA_SSM_NAME: ci.datadog-agent.changelog_commit_sha # agent-ci-experience
CHOCOLATEY_API_KEY_SSM_NAME: ci.datadog-agent.chocolatey_api_key # windows-agent
- DEB_GPG_KEY_SSM_NAME: ci.datadog-agent.deb_signing_private_key_v2_${DEB_GPG_KEY_ID} # agent-build-and-release
- DEB_SIGNING_PASSPHRASE_SSM_NAME: ci.datadog-agent.deb_signing_key_passphrase_v2_${DEB_GPG_KEY_ID} # agent-build-and-release
+ DEB_GPG_KEY_SSM_NAME: ci.datadog-agent.deb_signing_private_key_${DEB_GPG_KEY_ID} # agent-build-and-release
+ DEB_SIGNING_PASSPHRASE_SSM_NAME: ci.datadog-agent.deb_signing_key_passphrase_${DEB_GPG_KEY_ID} # agent-build-and-release
DOCKER_REGISTRY_LOGIN_SSM_KEY: ci.datadog-agent.docker_hub_login # agent-ci-experience
DOCKER_REGISTRY_PWD_SSM_KEY: ci.datadog-agent.docker_hub_pwd # agent-ci-experience
E2E_TESTS_API_KEY_SSM_NAME: ci.datadog-agent.e2e_tests_api_key # agent-developer-tools
@@ -232,8 +228,8 @@ variables:
MACOS_GITHUB_APP_ID_SSM_NAME: ci.datadog-agent.macos_github_app_id # agent-ci-experience
MACOS_GITHUB_INSTALLATION_ID_SSM_NAME: ci.datadog-agent.macos_github_installation_id # agent-ci-experience
MACOS_GITHUB_KEY_SSM_NAME: ci.datadog-agent.macos_github_key_b64 # agent-ci-experience
- RPM_GPG_KEY_SSM_NAME: ci.datadog-agent.rpm_signing_private_key_v2_${RPM_GPG_KEY_ID} # agent-build-and-release
- RPM_SIGNING_PASSPHRASE_SSM_NAME: ci.datadog-agent.rpm_signing_key_passphrase_v2_${RPM_GPG_KEY_ID} # agent-build-and-release
+ RPM_GPG_KEY_SSM_NAME: ci.datadog-agent.rpm_signing_private_key_${RPM_GPG_KEY_ID} # agent-build-and-release
+ RPM_SIGNING_PASSPHRASE_SSM_NAME: ci.datadog-agent.rpm_signing_key_passphrase_${RPM_GPG_KEY_ID} # agent-build-and-release
SMP_ACCOUNT_ID_SSM_NAME: ci.datadog-agent.single-machine-performance-account-id # single-machine-performance
SMP_AGENT_TEAM_ID_SSM_NAME: ci.datadog-agent.single-machine-performance-agent-team-id # single-machine-performance
SMP_API_SSM_NAME: ci.datadog-agent.single-machine-performance-api # single-machine-performance
@@ -256,10 +252,10 @@ variables:
# Condition mixins for simplification of rules
#
.if_main_branch: &if_main_branch
- if: $CI_COMMIT_BRANCH == "main"
+ if: $CI_COMMIT_BRANCH == "6.53.x"
.if_not_main_branch: &if_not_main_branch
- if: $CI_COMMIT_BRANCH != "main"
+ if: $CI_COMMIT_BRANCH != "6.53.x"
.if_release_branch: &if_release_branch
if: $CI_COMMIT_BRANCH =~ /^[0-9]+\.[0-9]+\.x$/
@@ -270,9 +266,6 @@ variables:
.if_not_version_6: &if_not_version_6
if: $RELEASE_VERSION_6 == ""
-.if_version_7: &if_version_7
- if: $RELEASE_VERSION_7 != ""
-
.if_not_version_7: &if_not_version_7
if: $RELEASE_VERSION_7 == ""
@@ -310,10 +303,10 @@ variables:
# RUN_ALL_BUILDS has no effect on main/deploy pipelines: they always run all builds (as some jobs
# on main and deploy pipelines depend on jobs that are only run if we run all builds).
.if_run_all_builds: &if_run_all_builds
- if: $CI_COMMIT_BRANCH == "main" || $DEPLOY_AGENT == "true" || $RUN_ALL_BUILDS == "true"
+ if: $CI_COMMIT_BRANCH == "6.53.x" || $DEPLOY_AGENT == "true" || $RUN_ALL_BUILDS == "true"
.if_not_run_all_builds: &if_not_run_all_builds
- if: $CI_COMMIT_BRANCH != "main" && $DEPLOY_AGENT != "true" && $RUN_ALL_BUILDS != "true"
+ if: $CI_COMMIT_BRANCH != "6.53.x" && $DEPLOY_AGENT != "true" && $RUN_ALL_BUILDS != "true"
# Rule to trigger test setup, run, and cleanup.
# By default:
@@ -322,7 +315,7 @@ variables:
# RUN_E2E_TESTS can be set to on to force all the installer tests to be run on a branch pipeline.
# RUN_E2E_TESTS can be set to false to force installer tests to not run on main/deploy pipelines.
.if_installer_tests: &if_installer_tests
- if: ($CI_COMMIT_BRANCH == "main" || $DEPLOY_AGENT == "true" || $RUN_E2E_TESTS == "on") && $RUN_E2E_TESTS != "off"
+ if: ($CI_COMMIT_BRANCH == "6.53.x" || $DEPLOY_AGENT == "true" || $RUN_E2E_TESTS == "on") && $RUN_E2E_TESTS != "off"
.if_testing_cleanup: &if_testing_cleanup
if: $TESTING_CLEANUP == "true"
@@ -368,7 +361,7 @@ variables:
if: $DEPLOY_AGENT == "true" && $BUCKET_BRANCH == "beta" && $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$/
.if_scheduled_main: &if_scheduled_main
- if: $CI_PIPELINE_SOURCE == "schedule" && $CI_COMMIT_BRANCH == "main"
+ if: $CI_PIPELINE_SOURCE == "schedule" && $CI_COMMIT_BRANCH == "6.53.x"
# Rule to trigger jobs only when a branch matches the mergequeue pattern.
.if_mergequeue: &if_mergequeue
@@ -416,38 +409,6 @@ workflow:
- when: manual
allow_failure: true
-.on_a6:
- - <<: *if_mergequeue
- when: never
- - <<: *if_version_6
-
-.on_a6_manual:
- - <<: *if_mergequeue
- when: never
- - <<: *if_version_6
- when: manual
- allow_failure: true
-
-.on_a7:
- - <<: *if_mergequeue
- when: never
- - <<: *if_version_7
-
-.on_a7_manual:
- - <<: *if_mergequeue
- when: never
- - <<: *if_version_7
- when: manual
- allow_failure: true
-
-.except_no_a6_or_no_a7:
- - <<: *if_mergequeue
- when: never
- - <<: *if_not_version_6
- when: never
- - <<: *if_not_version_7
- when: never
-
.on_dev_branch_manual:
- <<: *if_mergequeue
when: never
@@ -466,28 +427,6 @@ workflow:
when: manual
allow_failure: true
-.on_main_a6:
- - <<: *if_not_version_6
- when: never
- - <<: *if_main_branch
-
-.on_main_a7:
- - <<: *if_not_version_7
- when: never
- - <<: *if_main_branch
-
-.on_tag_or_a7:
- - <<: *if_mergequeue
- when: never
- - <<: *if_tagged_commit
- - <<: *if_version_7
-
-.on_tag_or_a7_all_builds:
- - <<: *if_not_run_all_builds
- when: never
- - <<: *if_tagged_commit
- - <<: *if_version_7
-
.on_deploy:
- <<: *if_deploy
@@ -495,109 +434,7 @@ workflow:
- <<: *if_deploy
when: on_failure
-.on_deploy_a6:
- - <<: *if_not_version_6
- when: never
- - <<: *if_deploy
-
-.on_deploy_a6_failure:
- - <<: *if_not_version_6
- when: never
- - <<: *if_deploy
- when: on_failure
-
-.on_deploy_a6_rc:
- - <<: *if_not_version_6
- when: never
- - <<: *if_not_deploy
- when: never
- - <<: *if_rc_tag_on_beta_repo_branch
- when: on_success
- variables:
- AGENT_REPOSITORY: agent
- DSD_REPOSITORY: dogstatsd
- IMG_REGISTRIES: public
-
-.on_deploy_a6_manual:
- - <<: *if_not_version_6
- when: never
- - <<: *if_not_deploy
- when: never
- - <<: *if_not_stable_or_beta_repo_branch
- when: manual
- allow_failure: true
- variables:
- AGENT_REPOSITORY: agent-dev
- IMG_REGISTRIES: dev
- - when: manual
- allow_failure: true
- variables:
- AGENT_REPOSITORY: agent
- IMG_REGISTRIES: public
-
-# Same as on_deploy_a6_manual, except the job would not run on pipelines
-# using beta branch, it would only run for the final release.
-.on_deploy_a6_manual_final:
- - <<: *if_not_version_6
- when: never
- - <<: *if_not_deploy
- when: never
- - <<: *if_deploy_on_beta_repo_branch
- when: never
- - <<: *if_not_stable_or_beta_repo_branch
- when: manual
- allow_failure: true
- variables:
- AGENT_REPOSITORY: agent-dev
- IMG_REGISTRIES: dev
- - when: manual
- allow_failure: true
- variables:
- AGENT_REPOSITORY: agent
- IMG_REGISTRIES: public
-
-# This rule is a variation of on_deploy_a6_manual where
-# the job is usually run manually, except when the pipeline
-# builds an RC: in this case, the job is run automatically.
-# This is done to reduce the number of manual steps that have
-# to be done when creating RCs.
-.on_deploy_a6_manual_auto_on_rc:
- - <<: *if_not_version_6
- when: never
- - <<: *if_not_deploy
- when: never
- - <<: *if_not_stable_or_beta_repo_branch
- when: manual
- allow_failure: true
- variables:
- AGENT_REPOSITORY: agent-dev
- IMG_REGISTRIES: dev
- - <<: *if_rc_tag_on_beta_repo_branch
- when: on_success
- variables:
- AGENT_REPOSITORY: agent
- DSD_REPOSITORY: dogstatsd
- IMG_REGISTRIES: public
- - when: manual
- allow_failure: true
- variables:
- AGENT_REPOSITORY: agent
- IMG_REGISTRIES: public
-
-.on_deploy_a7:
- - <<: *if_not_version_7
- when: never
- - <<: *if_deploy
-
-.on_deploy_a7_failure:
- - <<: *if_not_version_7
- when: never
- - <<: *if_deploy
- when: on_failure
-
-.on_deploy_a7_rc:
- - <<: *if_not_version_7
- when: never
+.on_deploy_rc:
- <<: *if_not_deploy
when: never
- <<: *if_rc_tag_on_beta_repo_branch
@@ -607,9 +444,7 @@ workflow:
DSD_REPOSITORY: dogstatsd
IMG_REGISTRIES: public
-.on_deploy_a7_manual:
- - <<: *if_not_version_7
- when: never
+.on_deploy_manual:
- <<: *if_not_deploy
when: never
- <<: *if_not_stable_or_beta_repo_branch
@@ -617,22 +452,17 @@ workflow:
allow_failure: true
variables:
AGENT_REPOSITORY: agent-dev
- DSD_REPOSITORY: dogstatsd-dev
IMG_REGISTRIES: dev
- when: manual
allow_failure: true
variables:
AGENT_REPOSITORY: agent
- DSD_REPOSITORY: dogstatsd
IMG_REGISTRIES: public
# rule to trigger job for internal image deployment if deploy is set or
# manually if not
-.on_deploy_a7_internal_or_manual:
- - <<: *if_mergequeue
- when: never
- - <<: *if_not_version_7
- when: never
+.on_deploy_internal_or_manual:
+ - !reference [.except_mergequeue]
- <<: *if_deploy
variables:
RELEASE_PROD: "true"
@@ -641,11 +471,9 @@ workflow:
variables:
RELEASE_PROD: "false"
-# Same as on_deploy_a7_manual, except the job would not run on pipelines
+# Same as on_deploy_manual, except the job would not run on pipelines
# using beta branch, it would only run for the final release.
-.on_deploy_a7_manual_final:
- - <<: *if_not_version_7
- when: never
+.on_deploy_manual_final:
- <<: *if_not_deploy
when: never
- <<: *if_deploy_on_beta_repo_branch
@@ -655,23 +483,19 @@ workflow:
allow_failure: true
variables:
AGENT_REPOSITORY: agent-dev
- DSD_REPOSITORY: dogstatsd-dev
IMG_REGISTRIES: dev
- when: manual
allow_failure: true
variables:
AGENT_REPOSITORY: agent
- DSD_REPOSITORY: dogstatsd
IMG_REGISTRIES: public
-# This rule is a variation of on_deploy_a7_manual where
+# This rule is a variation of on_deploy_manual where
# the job is usually run manually, except when the pipeline
# builds an RC: in this case, the job is run automatically.
# This is done to reduce the number of manual steps that have
# to be done when creating RCs.
-.on_deploy_a7_manual_auto_on_rc:
- - <<: *if_not_version_7
- when: never
+.on_deploy_manual_auto_on_rc:
- <<: *if_not_deploy
when: never
- <<: *if_not_stable_or_beta_repo_branch
@@ -679,7 +503,6 @@ workflow:
allow_failure: true
variables:
AGENT_REPOSITORY: agent-dev
- DSD_REPOSITORY: dogstatsd-dev
IMG_REGISTRIES: dev
- <<: *if_rc_tag_on_beta_repo_branch
when: on_success
@@ -691,17 +514,14 @@ workflow:
allow_failure: true
variables:
AGENT_REPOSITORY: agent
- DSD_REPOSITORY: dogstatsd
IMG_REGISTRIES: public
# This is used for image vulnerability scanning. Because agent 6
# uses python 2, which has many vulnerabilities that will not get
# patched, we do not wish to scan this image. For this reason, only
# agent 7 versions should be published internally using these
-# configurations.
-.on_deploy_a7_internal_rc:
- - <<: *if_not_version_7
- when: never
+# configurations. -> then we should remove this
+.on_deploy_internal_rc:
- <<: *if_not_deploy
when: never
- <<: *if_rc_tag_on_beta_repo_branch
@@ -712,11 +532,9 @@ workflow:
DSD_REPOSITORY: ci/datadog-agent/dogstatsd-release
IMG_REGISTRIES: internal-aws-ddbuild
-# Same as on_deploy_a7_manual_final, except the job is used to publish images
+# Same as on_deploy_manual_final, except the job is used to publish images
# to our internal registries.
-.on_deploy_a7_internal_manual_final:
- - <<: *if_not_version_7
- when: never
+.on_deploy_internal_manual_final:
- <<: *if_not_deploy
when: never
- <<: *if_deploy_on_beta_repo_branch
@@ -731,16 +549,7 @@ workflow:
DSD_REPOSITORY: ci/datadog-agent/dogstatsd-release
IMG_REGISTRIES: internal-aws-ddbuild
-.on_deploy_nightly_repo_branch_a6:
- - <<: *if_not_version_6
- when: never
- - <<: *if_not_nightly_or_dev_repo_branch
- when: never
- - <<: *if_deploy
-
-.on_deploy_nightly_repo_branch_a7:
- - <<: *if_not_version_7
- when: never
+.on_deploy_nightly_repo_branch:
- <<: *if_not_nightly_or_dev_repo_branch
when: never
- <<: *if_deploy
@@ -750,77 +559,13 @@ workflow:
when: never
- <<: *if_deploy
-.on_deploy_stable_or_beta_repo_branch_a6:
- - <<: *if_not_version_6
- when: never
- - <<: *if_not_stable_or_beta_repo_branch
- when: never
- - <<: *if_deploy
-
-.on_deploy_stable_or_beta_repo_branch_a6_manual:
- - <<: *if_not_version_6
- when: never
+.on_deploy_stable_or_beta_repo_branch_manual:
- <<: *if_not_stable_or_beta_repo_branch
when: never
- <<: *if_deploy
when: manual
allow_failure: true
-.on_deploy_stable_or_beta_repo_branch_a7:
- - <<: *if_not_version_7
- when: never
- - <<: *if_not_stable_or_beta_repo_branch
- when: never
- - <<: *if_deploy
-
-.on_deploy_stable_or_beta_repo_branch_a7_manual:
- - <<: *if_not_version_7
- when: never
- - <<: *if_not_stable_or_beta_repo_branch
- when: never
- - <<: *if_deploy
- when: manual
- allow_failure: true
-
-.on_deploy_stable_or_rc_tag_on_beta_repo_branch_a7:
- - <<: *if_not_version_7
- when: never
- - <<: *if_not_stable_or_beta_repo_branch
- when: never
- - <<: *if_rc_tag_on_beta_repo_branch
- when: on_success
- - <<: *if_deploy_on_stable_repo_branch
- when: on_success
- - when: never
-
-.on_deploy_stable_or_rc_tag_on_beta_repo_branch_a7_manual_on_stable:
- - <<: *if_not_version_7
- when: never
- - <<: *if_not_stable_or_beta_repo_branch
- when: never
- - <<: *if_rc_tag_on_beta_repo_branch
- when: on_success
- - <<: *if_deploy_on_stable_repo_branch
- when: manual
- allow_failure: true
- - when: never
-
-# This rule is a variation of on_deploy_stable_or_beta_repo_branch_a7_manual where
-# the job is usually run manually, except when the pipeline
-# builds an RC: in this case, the job is run automatically.
-# This is done to reduce the number of manual steps that have
-# to be done when creating RCs.
-.on_deploy_stable_or_beta_repo_branch_a7_manual_auto_on_rc:
- - <<: *if_not_version_7
- when: never
- - <<: *if_not_stable_or_beta_repo_branch
- when: never
- - <<: *if_rc_tag_on_beta_repo_branch
- when: on_success
- - <<: *if_deploy
- when: manual
- allow_failure: true
-
# This rule will add the job as manual when running on beta deploy branch
# and will add it as a regular automatically running job when running
# on stable deploy branch.
@@ -834,15 +579,6 @@ workflow:
allow_failure: true
- when: on_success
-.on_deploy_stable_repo_branch_a7_manual:
- - <<: *if_not_version_7
- when: never
- - <<: *if_not_stable_repo_branch
- when: never
- - <<: *if_deploy
- when: manual
- allow_failure: true
-
.except_deploy:
- <<: *if_deploy
when: never
@@ -852,20 +588,6 @@ workflow:
- if: $DEPLOY_AGENT == "false" && $RUN_E2E_TESTS == "off"
when: never
-.on_a6_except_deploy:
- - <<: *if_not_version_6
- when: never
- - <<: *if_deploy
- when: never
- - when: on_success
-
-.on_a7_except_deploy:
- - <<: *if_not_version_7
- when: never
- - <<: *if_deploy
- when: never
- - when: on_success
-
.on_main_or_release_branch:
- <<: *if_main_branch
- <<: *if_release_branch
@@ -895,85 +617,40 @@ workflow:
.on_all_builds:
- <<: *if_run_all_builds
-.on_all_builds_a6:
- - <<: *if_not_version_6
- when: never
- - <<: *if_run_all_builds
-
-.on_all_builds_a6_manual:
- - <<: *if_not_version_6
- when: never
+.on_all_builds_manual:
- <<: *if_run_all_builds
when: manual
allow_failure: true
-.on_all_builds_a7:
- - <<: *if_not_version_7
- when: never
- - <<: *if_run_all_builds
-
-.on_all_builds_a7_manual:
- - <<: *if_not_version_7
- when: never
- - <<: *if_run_all_builds
- when: manual
- allow_failure: true
-
-.on_kitchen_tests_a6:
- - <<: *if_not_version_6
- when: never
+.on_kitchen_tests:
- <<: *if_installer_tests
-.on_kitchen_tests_a6_always:
- - <<: *if_not_version_6
- when: never
+.on_kitchen_tests_always:
- <<: *if_installer_tests
when: always
-.on_all_kitchen_builds_a6:
- - <<: *if_not_version_6
- when: never
+.on_all_kitchen_builds:
- <<: *if_not_run_all_builds
when: never
- <<: *if_installer_tests
-.on_kitchen_tests_a7:
- - <<: *if_not_version_7
- when: never
- - <<: *if_installer_tests
-
-.on_all_kitchen_builds_a7:
- - <<: *if_not_version_7
- when: never
- - <<: *if_not_run_all_builds
- when: never
- - <<: *if_installer_tests
-
-.on_all_new-e2e_tests_a7:
- - <<: *if_not_version_7
- when: never
- - <<: *if_not_run_all_builds
- when: never
+.on_all_install_script_tests:
- <<: *if_installer_tests
# Default kitchen tests are also run on dev branches
# In that case, the target OS versions is a subset of the
# available versions, stored in DEFAULT_KITCHEN_OSVERS
-.on_default_kitchen_tests_a7:
+.on_default_kitchen_tests:
- <<: *if_mergequeue
when: never
- - <<: *if_not_version_7
- when: never
- <<: *if_installer_tests
- <<: *if_auto_e2e_tests
variables:
KITCHEN_OSVERS: $DEFAULT_KITCHEN_OSVERS
-.on_default_new-e2e_tests_a7:
+.on_default_new-e2e_tests:
- <<: *if_mergequeue
when: never
- - <<: *if_not_version_7
- when: never
- <<: *if_disable_e2e_tests
when: never
- <<: *if_installer_tests
@@ -981,18 +658,6 @@ workflow:
variables:
E2E_OSVERS: $E2E_BRANCH_OSVERS
-.on_default_kitchen_tests_a7_always:
- - <<: *if_mergequeue
- when: never
- - <<: *if_not_version_7
- when: never
- - <<: *if_installer_tests
- when: always
- - <<: *if_auto_e2e_tests
- when: always
- variables:
- KITCHEN_OSVERS: $DEFAULT_KITCHEN_OSVERS
-
.on_main_or_testing_cleanup:
- <<: *if_main_branch
- <<: *if_testing_cleanup
@@ -1027,7 +692,7 @@ workflow:
- <<: *if_run_all_kmt_tests
- changes:
paths: *security_agent_change_paths
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
when: on_success
- when: manual
allow_failure: true
@@ -1040,7 +705,7 @@ workflow:
- test/new-e2e/tests/windows/install-test/**/*
- test/new-e2e/tests/windows/domain-test/**/*
- tasks/msi.py
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
.on_windows_installer_changes_or_manual:
- <<: *if_disable_e2e_tests
@@ -1086,7 +751,7 @@ workflow:
- <<: *if_run_all_kmt_tests
- changes:
paths: *system_probe_change_paths
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
when: on_success
- when: manual
allow_failure: true
@@ -1100,7 +765,7 @@ workflow:
- test/new-e2e/pkg/**/*
- test/new-e2e/test-infra-definition/*
- test/new-e2e/go.mod
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
when: on_success
- when: manual
allow_failure: true
@@ -1125,7 +790,7 @@ workflow:
paths:
- test/new-e2e/pkg/**/*
- test/new-e2e/go.mod
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
.always_on_container_or_e2e_changes_or_manual:
- <<: *if_disable_e2e_tests
@@ -1144,7 +809,7 @@ workflow:
paths:
- test/new-e2e/pkg/**/*
- test/new-e2e/go.mod
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
- changes:
paths:
- comp/core/tagger/**/*
@@ -1166,7 +831,7 @@ workflow:
- pkg/util/cgroups/**/*
- test/new-e2e/tests/containers/**/*
- test/new-e2e/go.mod
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
when: always
- when: manual
allow_failure: true
@@ -1194,7 +859,7 @@ workflow:
- pkg/util/cgroups/**/*
- test/new-e2e/tests/containers/**/*
- test/new-e2e/go.mod
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
when: on_success
- when: manual
allow_failure: true
@@ -1206,7 +871,7 @@ workflow:
- pkg/config/remote/**/*
- comp/remote-config/**/*
- test/new-e2e/tests/remote-config/**/*
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
- when: manual
allow_failure: true
@@ -1217,11 +882,11 @@ workflow:
when: never
- changes:
paths: *system_probe_change_paths
- compare_to: 7.53.x
+ compare_to: 6.53.x
when: on_success
- changes:
paths: *security_agent_change_paths
- compare_to: 7.53.x
+ compare_to: 6.53.x
when: on_success
- when: manual
allow_failure: true
@@ -1232,8 +897,9 @@ workflow:
paths:
# TODO: Add paths that should trigger tests for ASC
- test/new-e2e/tests/agent-shared-components/**/*
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
- when: manual
+ allow_failure: true
.on_subcommands_or_e2e_changes_or_manual:
- !reference [.on_e2e_main_release_or_rc]
@@ -1243,8 +909,9 @@ workflow:
- pkg/**/*
- comp/**/*
- test/new-e2e/tests/agent-subcommands/**/*
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
- when: manual
+ allow_failure: true
.on_language-detection_or_e2e_changes_or_manual:
- !reference [.on_e2e_main_release_or_rc]
@@ -1252,7 +919,7 @@ workflow:
paths:
# TODO: Add paths that should trigger tests for language detection
- test/new-e2e/tests/language-detection/**/*
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
- when: manual
allow_failure: true
@@ -1262,7 +929,7 @@ workflow:
paths:
# TODO: Add paths that should trigger tests for npm
- test/new-e2e/tests/npm/**/*
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
- when: manual
allow_failure: true
@@ -1272,7 +939,7 @@ workflow:
paths:
# TODO: Add paths that should trigger tests for AML
- test/new-e2e/tests/agent-metrics-logs/**/*
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
- when: manual
allow_failure: true
@@ -1282,7 +949,7 @@ workflow:
paths:
# TODO: Add paths that should trigger tests for CWS
- test/new-e2e/tests/cws/**/*
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
- when: manual
allow_failure: true
@@ -1292,7 +959,7 @@ workflow:
paths:
# TODO: Add paths that should trigger tests for process
- test/new-e2e/tests/process/**/*
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
- when: manual
allow_failure: true
@@ -1302,7 +969,7 @@ workflow:
paths:
# TODO: Add paths that should trigger tests for orchestrator
- test/new-e2e/tests/orchestrator/**/*
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
- when: manual
allow_failure: true
@@ -1317,6 +984,19 @@ workflow:
when: manual
allow_failure: true
+.on_installer_or_e2e_changes:
+ - !reference [.on_e2e_main_release_or_rc]
+ - changes:
+ paths:
+ - .gitlab/**/*
+ - omnibus/config/**/*
+ - pkg/fleet/**/*
+ - cmd/installer/**/*
+ - test/new-e2e/tests/installer/**/*
+ - tasks/installer.py
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ when: on_success
+
.on_apm_or_e2e_changes_or_manual:
- !reference [.on_e2e_main_release_or_rc]
- changes:
@@ -1326,7 +1006,7 @@ workflow:
- comp/trace/**/*
- test/new-e2e/tests/apm/**/*
- test/new-e2e/go.mod
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
when: on_success
- when: manual
allow_failure: true
@@ -1348,7 +1028,7 @@ workflow:
- cmd/updater/**/*
- test/new-e2e/tests/updater/**/*
- test/new-e2e/go.mod
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
when: on_success
- when: manual
allow_failure: true
@@ -1397,14 +1077,14 @@ workflow:
- .gitlab/package_build.yml
- release.json
- .gitlab/package_build/**/*
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
.on_go-version_change:
- !reference [.except_mergequeue] # The prerequisites are not run in the mergequeue pipeline so we need to skip this rule
- changes:
paths:
- .go-version
- compare_to: 7.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
+ compare_to: 6.53.x # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
.on_fakeintake_changes: &on_fakeintake_changes
changes:
@@ -1415,14 +1095,14 @@ workflow:
.on_fakeintake_changes_on_main_or_manual:
- <<: *on_fakeintake_changes
- if: $CI_COMMIT_BRANCH == "main"
+ if: $CI_COMMIT_BRANCH == "6.53.x"
- <<: *on_fakeintake_changes
when: manual
allow_failure: true
.on_fakeintake_changes_on_main:
- <<: *on_fakeintake_changes
- if: $CI_COMMIT_BRANCH == "main"
+ if: $CI_COMMIT_BRANCH == "6.53.x"
.fast_on_dev_branch_only:
- <<: *if_main_branch
@@ -1442,3 +1122,11 @@ workflow:
FAST_TESTS: "false"
- variables:
FAST_TESTS: "true"
+
+.on_gitlab_changes:
+ - !reference [.except_mergequeue]
+ - changes:
+ paths:
+ - .gitlab-ci.yml
+ - .gitlab/**/*
+ compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916
diff --git a/.gitlab/.pre/test_gitlab_configuration.yml b/.gitlab/.pre/test_gitlab_configuration.yml
new file mode 100644
index 0000000000000..fd740ffc712d9
--- /dev/null
+++ b/.gitlab/.pre/test_gitlab_configuration.yml
@@ -0,0 +1,10 @@
+test_gitlab_configuration:
+ stage: .pre
+ image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
+ tags: ["arch:amd64"]
+ rules:
+ - !reference [.on_gitlab_changes]
+ script:
+ - source /root/.bashrc
+ - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME)
+ - inv lint-gitlab
diff --git a/.gitlab/JOBOWNERS b/.gitlab/JOBOWNERS
index f1b85f7eb7766..648a6d4a6146c 100644
--- a/.gitlab/JOBOWNERS
+++ b/.gitlab/JOBOWNERS
@@ -116,21 +116,8 @@ k8s-e2e-cws-* @DataDog/agent-security
k8s-e2e-cspm-* @DataDog/agent-security
# New E2E
-new-e2e-containers* @DataDog/container-integrations
-new-e2e-agent-subcommands* @DataDog/agent-shared-components
-new-e2e-agent-shared-components* @DataDog/agent-shared-components
-new-e2e-language-detection* @DataDog/processes
-new-e2e-process* @DataDog/processes
-new-e2e-agent-platform* @DataDog/agent-build-and-releases
-new-e2e-aml* @DataDog/agent-metrics-logs
-new-e2e-apm* @DataDog/agent-apm
-new-e2e-npm* @DataDog/Networks
-new-e2e-cws* @DataDog/agent-security
-new-e2e-windows-agent* @DataDog/windows-agent
-new-e2e-orchestrator* @DataDog/container-app
-e2e_pre_test* @DataDog/agent-developer-tools
-new-e2e-remote-config* @DataDog/remote-config
-new-e2e-updater* @DataDog/fleet
+e2e_pre_test* @DataDog/agent-devx-loops
+new-e2e* @DataDog/multiple
# Kernel matrix testing
upload_dependencies* @DataDog/ebpf-platform
diff --git a/.gitlab/benchmarks/include.yml b/.gitlab/benchmarks/include.yml
index 1b1308ecbe90f..03e75501e413f 100644
--- a/.gitlab/benchmarks/include.yml
+++ b/.gitlab/benchmarks/include.yml
@@ -3,5 +3,5 @@
# Contains jobs to benchmark the Agent.
include:
- - /.gitlab/benchmarks/benchmarks.yml
- - /.gitlab/benchmarks/macrobenchmarks.yml
\ No newline at end of file
+ - .gitlab/benchmarks/benchmarks.yml
+ - .gitlab/benchmarks/macrobenchmarks.yml
\ No newline at end of file
diff --git a/.gitlab/binary_build/cluster_agent.yml b/.gitlab/binary_build/cluster_agent.yml
index 7cc098f2da64a..2086e70abb976 100644
--- a/.gitlab/binary_build/cluster_agent.yml
+++ b/.gitlab/binary_build/cluster_agent.yml
@@ -4,7 +4,7 @@
needs: ["go_mod_tidy_check"]
script:
- inv check-go-version
- - inv -e cluster-agent.build --release-version "$RELEASE_VERSION_7"
+ - inv -e cluster-agent.build --release-version "$RELEASE_VERSION_6"
- $S3_CP_CMD $CI_PROJECT_DIR/$CLUSTER_AGENT_BINARIES_DIR/datadog-cluster-agent $S3_ARTIFACTS_URI/datadog-cluster-agent.$ARCH
- $S3_CP_CMD $CI_PROJECT_DIR/Dockerfiles/cluster-agent/datadog-cluster.yaml $S3_ARTIFACTS_URI/datadog-cluster.yaml
- $S3_CP_CMD $CI_PROJECT_DIR/Dockerfiles/cluster-agent/security-agent-policies $S3_ARTIFACTS_URI/security-agent-policies --recursive
@@ -15,7 +15,8 @@
cluster_agent-build_amd64:
extends: .cluster_agent-build_common
rules:
- !reference [.on_tag_or_a7]
+ - !reference [.except_mergequeue]
+ - when: on_success
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
needs: ["go_mod_tidy_check", "go_deps"]
@@ -28,7 +29,8 @@ cluster_agent-build_amd64:
cluster_agent-build_arm64:
extends: .cluster_agent-build_common
rules:
- !reference [.on_tag_or_a7]
+ - !reference [.except_mergequeue]
+ - when: on_success
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:arm64"]
needs: ["go_mod_tidy_check", "go_deps"]
diff --git a/.gitlab/binary_build/cluster_agent_cloudfoundry.yml b/.gitlab/binary_build/cluster_agent_cloudfoundry.yml
index 34892c1b8d2e7..07e5b31f5dc6d 100644
--- a/.gitlab/binary_build/cluster_agent_cloudfoundry.yml
+++ b/.gitlab/binary_build/cluster_agent_cloudfoundry.yml
@@ -1,7 +1,8 @@
---
cluster_agent_cloudfoundry-build_amd64:
rules:
- !reference [.on_a7]
+ - !reference [.except_mergequeue]
+ - when: on_success
stage: binary_build
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
@@ -20,5 +21,5 @@ cluster_agent_cloudfoundry-build_amd64:
- inv -e cluster-agent-cloudfoundry.build
- cd $CI_PROJECT_DIR/$CLUSTER_AGENT_CLOUDFOUNDRY_BINARIES_DIR
- mkdir -p $OMNIBUS_PACKAGE_DIR
- - export PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7)
+ - export PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 6)
- tar cf $OMNIBUS_PACKAGE_DIR/datadog-cluster-agent-cloudfoundry-$PACKAGE_VERSION-$ARCH.tar.xz datadog-cluster-agent-cloudfoundry
diff --git a/.gitlab/binary_build/cws_instrumentation.yml b/.gitlab/binary_build/cws_instrumentation.yml
index fb5d9993f54cc..55f66c879890c 100644
--- a/.gitlab/binary_build/cws_instrumentation.yml
+++ b/.gitlab/binary_build/cws_instrumentation.yml
@@ -10,7 +10,8 @@
cws_instrumentation-build_amd64:
extends: .cws_instrumentation-build_common
rules:
- !reference [.on_tag_or_a7]
+ - !reference [.except_mergequeue]
+ - when: on_success
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
needs: ["go_mod_tidy_check", "go_deps"]
@@ -23,7 +24,8 @@ cws_instrumentation-build_amd64:
cws_instrumentation-build_arm64:
extends: .cws_instrumentation-build_common
rules:
- !reference [.on_tag_or_a7]
+ - !reference [.except_mergequeue]
+ - when: on_success
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:arm64"]
needs: ["go_mod_tidy_check", "go_deps"]
diff --git a/.gitlab/binary_build/include.yml b/.gitlab/binary_build/include.yml
index b2aeddeef6286..f9fb49704b391 100644
--- a/.gitlab/binary_build/include.yml
+++ b/.gitlab/binary_build/include.yml
@@ -3,11 +3,10 @@
# Contains jobs which build various go binaries (dogstatsd, IoT agent, cluster-agent, cluster-agent-cloudfoundry).
include:
- - /.gitlab/binary_build/cluster_agent_cloudfoundry.yml
- - /.gitlab/binary_build/cluster_agent.yml
- - /.gitlab/binary_build/cws_instrumentation.yml
- - /.gitlab/binary_build/fakeintake.yml
- - /.gitlab/binary_build/linux.yml
- - /.gitlab/binary_build/system_probe.yml
- - /.gitlab/binary_build/windows.yml
- - /.gitlab/binary_build/serverless.yml
+ - .gitlab/binary_build/cluster_agent_cloudfoundry.yml
+ - .gitlab/binary_build/cluster_agent.yml
+ - .gitlab/binary_build/cws_instrumentation.yml
+ - .gitlab/binary_build/fakeintake.yml
+ - .gitlab/binary_build/system_probe.yml
+ - .gitlab/binary_build/windows.yml
+ - .gitlab/binary_build/serverless.yml
diff --git a/.gitlab/binary_build/linux.yml b/.gitlab/binary_build/linux.yml
deleted file mode 100644
index 0ffea6d21db08..0000000000000
--- a/.gitlab/binary_build/linux.yml
+++ /dev/null
@@ -1,127 +0,0 @@
----
-build_dogstatsd_static-binary_x64:
- stage: binary_build
- rules: !reference [.on_a7]
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs: ["lint_linux-x64", "go_deps"]
- variables:
- ARCH: amd64
- before_script:
- - source /root/.bashrc
- - !reference [.retrieve_linux_go_deps]
- script:
- - inv check-go-version
- - inv -e dogstatsd.build --static --major-version 7
- - $S3_CP_CMD $CI_PROJECT_DIR/$STATIC_BINARIES_DIR/dogstatsd $S3_ARTIFACTS_URI/static/dogstatsd.$ARCH
-
-build_dogstatsd_static-binary_arm64:
- stage: binary_build
- rules: !reference [.on_a7]
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES
- tags: ["arch:arm64"]
- needs: ["lint_linux-arm64", "go_deps"]
- variables:
- ARCH: arm64
- before_script:
- - source /root/.bashrc
- - !reference [.retrieve_linux_go_deps]
- script:
- - inv check-go-version
- - inv -e dogstatsd.build --static --major-version 7
- - $S3_CP_CMD $CI_PROJECT_DIR/$STATIC_BINARIES_DIR/dogstatsd $S3_ARTIFACTS_URI/static/dogstatsd.$ARCH
-
-build_dogstatsd-binary_x64:
- stage: binary_build
- rules: !reference [.on_a7]
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs: ["lint_linux-x64", "go_deps"]
- before_script:
- - source /root/.bashrc
- - !reference [.retrieve_linux_go_deps]
- script:
- - inv check-go-version
- - inv -e dogstatsd.build --major-version 7
- - $S3_CP_CMD $CI_PROJECT_DIR/$DOGSTATSD_BINARIES_DIR/dogstatsd $S3_ARTIFACTS_URI/dogstatsd/dogstatsd
-
-build_dogstatsd-binary_arm64:
- rules:
- - !reference [.on_all_builds_a7]
- - !reference [.on_packaging_change]
- - !reference [.on_go-version_change]
- stage: binary_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES
- tags: ["arch:arm64"]
- needs: ["lint_linux-arm64", "go_deps"]
- variables:
- ARCH: arm64
- before_script:
- - source /root/.bashrc
- - !reference [.retrieve_linux_go_deps]
- script:
- - inv check-go-version
- - inv -e dogstatsd.build --major-version 7
- - $S3_CP_CMD $CI_PROJECT_DIR/$DOGSTATSD_BINARIES_DIR/dogstatsd $S3_ARTIFACTS_URI/dogstatsd/dogstatsd.$ARCH
-
-# IoT Agent builds to make sure the build is not broken because of build flags
-build_iot_agent-binary_x64:
- stage: binary_build
- rules: !reference [.on_a7]
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs: ["lint_linux-x64", "go_deps"]
- before_script:
- - !reference [.retrieve_linux_go_deps]
- script:
- - source /root/.bashrc
- - inv check-go-version
- - inv -e agent.build --flavor iot --major-version 7
- - $S3_CP_CMD $CI_PROJECT_DIR/$AGENT_BINARIES_DIR/agent $S3_ARTIFACTS_URI/iot/agent
-
-build_iot_agent-binary_arm64:
- rules: !reference [.on_all_builds_a7]
- stage: binary_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:arm64"]
- needs: ["lint_linux-arm64", "go_deps"]
- variables:
- ARCH: arm64
- before_script:
- - !reference [.retrieve_linux_go_deps]
- script:
- - source /root/.bashrc
- - inv check-go-version
- - inv -e agent.build --flavor iot --major-version 7
-
-build_agentless_scanner-binary_x64:
- stage: binary_build
- rules:
- !reference [.on_a7]
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs: ["lint_linux-x64", "go_deps"]
- before_script:
- - source /root/.bashrc
- - !reference [.retrieve_linux_go_deps]
- script:
- - inv check-go-version
- - inv -e agentless-scanner.build --major-version 7
- - $S3_CP_CMD $CI_PROJECT_DIR/$AGENTLESS_SCANNER_BINARIES_DIR/agentless-scanner $S3_ARTIFACTS_URI/agentless-scanner/agentless-scanner
-
-build_agentless_scanner-binary_arm64:
- stage: binary_build
- rules:
- !reference [.on_a7]
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES
- tags: ["arch:arm64"]
- needs: ["lint_linux-arm64", "go_deps"]
- variables:
- ARCH: arm64
- before_script:
- - source /root/.bashrc
- - !reference [.retrieve_linux_go_deps]
- script:
- - inv check-go-version
- - inv -e agentless-scanner.build --major-version 7
- - $S3_CP_CMD $CI_PROJECT_DIR/$AGENTLESS_SCANNER_BINARIES_DIR/agentless-scanner $S3_ARTIFACTS_URI/agentless-scanner/agentless-scanner.$ARCH
diff --git a/.gitlab/check_deploy/check_deploy.yml b/.gitlab/check_deploy/check_deploy.yml
index d014ec0f214f1..49df76125370f 100644
--- a/.gitlab/check_deploy/check_deploy.yml
+++ b/.gitlab/check_deploy/check_deploy.yml
@@ -8,7 +8,7 @@
# overwrite a public package). To update an erroneous package, first remove it
# from our S3 bucket.
check_already_deployed_version_6:
- rules: !reference [.on_deploy_a6]
+ rules: !reference [.on_deploy]
stage: check_deploy
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
tags: ["arch:amd64"]
@@ -18,15 +18,3 @@ check_already_deployed_version_6:
script:
- cd $OMNIBUS_PACKAGE_DIR && /deploy_scripts/fail_deb_is_pkg_already_exists.sh datadog-agent_6*_amd64.deb
- cd $OMNIBUS_PACKAGE_DIR && /deploy_scripts/fail_deb_is_pkg_already_exists.sh datadog-agent_6*_arm64.deb
-
-check_already_deployed_version_7:
- rules: !reference [.on_deploy_a7]
- stage: check_deploy
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
- tags: ["arch:amd64"]
- dependencies: ["agent_deb-x64-a7", "agent_deb-arm64-a7"]
- before_script:
- - ls $OMNIBUS_PACKAGE_DIR
- script:
- - cd $OMNIBUS_PACKAGE_DIR && /deploy_scripts/fail_deb_is_pkg_already_exists.sh datadog-agent_7*_amd64.deb
- - cd $OMNIBUS_PACKAGE_DIR && /deploy_scripts/fail_deb_is_pkg_already_exists.sh datadog-agent_7*_arm64.deb
diff --git a/.gitlab/choco_build/choco_build.yml b/.gitlab/choco_build/choco_build.yml
deleted file mode 100644
index 9b89e5f10ebe1..0000000000000
--- a/.gitlab/choco_build/choco_build.yml
+++ /dev/null
@@ -1,50 +0,0 @@
----
-# choco_build stage
-# Contains jobs which build the chocolatey Agent package.
-
-# Not currently used in the pipeline.
-windows_choco_offline_7_x64:
- rules:
- !reference [.on_a7_manual]
- stage: choco_build
- tags: ["runner:windows-docker", "windowsversion:1809"]
- needs: ["windows_msi_and_bosh_zip_x64-a7"]
- variables:
- ARCH: "x64"
- script:
- - $ErrorActionPreference = "Stop"
- - Get-ChildItem omnibus\pkg
- - copy omnibus\pkg\*.msi .\chocolatey\tools-offline\
- - docker run --rm -v "$(Get-Location):c:\mnt" 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} c:\mnt\tasks\winbuildscripts\chocopack.bat offline
- - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" }
- - copy build-out\*.nupkg omnibus\pkg
- artifacts:
- expire_in: 2 weeks
- paths:
- - omnibus/pkg
-
-# The online version of the choco job fetches the msi package from S3 so
-# it is run only once the msi package is pushed
-windows_choco_online_7_x64:
- rules:
- !reference [.on_deploy_stable_or_beta_repo_branch_a7_manual]
- stage: choco_build
- tags: ["runner:windows-docker", "windowsversion:1809"]
- needs: ["deploy_staging_windows_tags-7"]
- variables:
- ARCH: "x64"
- script:
- - '$_instance_id = (iwr -UseBasicParsing http://169.254.169.254/latest/meta-data/instance-id).content ; Write-Host "Running on instance $($_instance_id)"'
- - $ErrorActionPreference = "Stop"
- - if (Test-Path .omnibus) { remove-item -recurse -force .omnibus }
- - if (Test-Path build-out) { remove-item -recurse -force build-out }
- - mkdir omnibus\pkg
- - docker run --rm -v "$(Get-Location):c:\mnt" 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} c:\mnt\tasks\winbuildscripts\chocopack.bat online
- - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" }
- - copy build-out\*.nupkg omnibus\pkg
- artifacts:
- expire_in: 2 weeks
- paths:
- - omnibus/pkg
- # Sometimes Chocolatey is flakey
- retry: 2
diff --git a/.gitlab/choco_deploy/choco_deploy.yml b/.gitlab/choco_deploy/choco_deploy.yml
deleted file mode 100644
index 5348a2096ba50..0000000000000
--- a/.gitlab/choco_deploy/choco_deploy.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-# choco_build stage
-# Contains a job which deploys the chocolatey Agent package.
-
-publish_choco_7_x64:
- rules: !reference [.on_deploy_stable_or_beta_repo_branch_a7_manual]
- stage: choco_deploy
- tags: ["runner:windows-docker", "windowsversion:1809"]
- needs: ["windows_choco_online_7_x64"]
- variables:
- ARCH: "x64"
- before_script:
- - $chocolateyApiKey = (aws ssm get-parameter --region us-east-1 --name $CHOCOLATEY_API_KEY_SSM_NAME --with-decryption --query "Parameter.Value" --out text)
- script:
- - '$_instance_id = (iwr -UseBasicParsing http://169.254.169.254/latest/meta-data/instance-id).content ; Write-Host "Running on instance $($_instance_id)"'
- - $ErrorActionPreference = "Stop"
- - Get-ChildItem omnibus\pkg
- - if (Test-Path nupkg) { remove-item -recurse -force nupkg }
- - mkdir nupkg
- - copy omnibus\pkg\*.nupkg nupkg\
- - Get-ChildItem nupkg
- - >
- docker run --rm
- -v "$(Get-Location):c:\mnt"
- -e CHOCOLATEY_API_KEY=${chocolateyApiKey}
- 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES}
- c:\mnt\tasks\winbuildscripts\chocopush.bat
- - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" }
diff --git a/.gitlab/container_build/docker_linux.yml b/.gitlab/container_build/docker_linux.yml
index a764ef8790544..d869dde91f83c 100644
--- a/.gitlab/container_build/docker_linux.yml
+++ b/.gitlab/container_build/docker_linux.yml
@@ -41,7 +41,9 @@
# build agent6 py2 image
docker_build_agent6:
extends: .docker_build_job_definition_amd64
- rules: !reference [.on_a6]
+ rules:
+ - !reference [.except_mergequeue]
+ - when: on_success
needs:
- job: agent_deb-x64-a6
artifacts: false
@@ -53,7 +55,9 @@ docker_build_agent6:
docker_build_agent6_arm64:
extends: .docker_build_job_definition_arm64
- rules: !reference [.on_all_builds_a6]
+ rules:
+ - !reference [.except_mergequeue]
+ - when: on_success
needs:
- job: agent_deb-arm64-a6
artifacts: false
@@ -66,7 +70,9 @@ docker_build_agent6_arm64:
# build agent6 py2 jmx image
docker_build_agent6_jmx:
extends: .docker_build_job_definition_amd64
- rules: !reference [.on_a6]
+ rules:
+ - !reference [.except_mergequeue]
+ - when: on_success
needs:
- job: agent_deb-x64-a6
artifacts: false
@@ -80,7 +86,9 @@ docker_build_agent6_jmx:
# build agent6 py2 jmx image
docker_build_agent6_jmx_arm64:
extends: .docker_build_job_definition_arm64
- rules: !reference [.on_all_builds_a6]
+ rules:
+ - !reference [.except_mergequeue]
+ - when: on_success
needs:
- job: agent_deb-arm64-a6
artifacts: false
@@ -95,7 +103,9 @@ docker_build_agent6_jmx_arm64:
# build agent6 jmx unified image (including python3)
docker_build_agent6_py2py3_jmx:
extends: .docker_build_job_definition_amd64
- rules: !reference [.on_a6]
+ rules:
+ - !reference [.except_mergequeue]
+ - when: on_success
needs:
- job: agent_deb-x64-a6
artifacts: false
@@ -105,71 +115,25 @@ docker_build_agent6_py2py3_jmx:
TAG_SUFFIX: -6-py2py3-jmx
BUILD_ARG: --target test --build-arg WITH_JMX=true --build-arg DD_AGENT_ARTIFACT=datadog-agent_6*_amd64.deb
-# build agent7 image
-docker_build_agent7:
- extends: .docker_build_job_definition_amd64
- rules: !reference [.on_a7]
- needs:
- - job: agent_deb-x64-a7
- artifacts: false
- variables:
- IMAGE: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent/agent
- BUILD_CONTEXT: Dockerfiles/agent
- TAG_SUFFIX: -7
- BUILD_ARG: --target test --build-arg PYTHON_VERSION=3 --build-arg DD_AGENT_ARTIFACT=datadog-agent_7*_amd64.deb
-
-single_machine_performance-amd64-a7:
+single_machine_performance-amd64-a6:
extends: .docker_publish_job_definition
stage: container_build
- rules: !reference [.on_a7]
+ rules:
+ - !reference [.except_mergequeue]
+ - when: on_success
needs:
- - docker_build_agent7
+ - docker_build_agent6
variables:
IMG_REGISTRIES: internal-aws-smp
- IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-amd64
- IMG_DESTINATIONS: 08450328-agent:${CI_COMMIT_SHA}-7-amd64
-
-docker_build_agent7_arm64:
- extends: .docker_build_job_definition_arm64
- rules: !reference [.on_a7]
- needs:
- - job: agent_deb-arm64-a7
- artifacts: false
- variables:
- IMAGE: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent/agent
- BUILD_CONTEXT: Dockerfiles/agent
- TAG_SUFFIX: -7
- BUILD_ARG: --target test --build-arg PYTHON_VERSION=3 --build-arg DD_AGENT_ARTIFACT=datadog-agent_7*_arm64.deb
-
-# build agent7 jmx image
-docker_build_agent7_jmx:
- extends: .docker_build_job_definition_amd64
- rules: !reference [.on_a7]
- needs:
- - job: agent_deb-x64-a7
- artifacts: false
- variables:
- IMAGE: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent/agent
- BUILD_CONTEXT: Dockerfiles/agent
- TAG_SUFFIX: -7-jmx
- BUILD_ARG: --target test --build-arg WITH_JMX=true --build-arg PYTHON_VERSION=3 --build-arg DD_AGENT_ARTIFACT=datadog-agent_7*_amd64.deb
-
-docker_build_agent7_jmx_arm64:
- extends: .docker_build_job_definition_arm64
- rules: !reference [.on_a7]
- needs:
- - job: agent_deb-arm64-a7
- artifacts: false
- variables:
- IMAGE: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent/agent
- BUILD_CONTEXT: Dockerfiles/agent
- TAG_SUFFIX: -7-jmx
- BUILD_ARG: --target test --build-arg WITH_JMX=true --build-arg PYTHON_VERSION=3 --build-arg DD_AGENT_ARTIFACT=datadog-agent_7*_arm64.deb
+ IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-amd64
+ IMG_DESTINATIONS: 08450328-agent:${CI_COMMIT_SHA}-6-amd64
# build the cluster-agent image
docker_build_cluster_agent_amd64:
extends: .docker_build_job_definition_amd64
- rules: !reference [.on_tag_or_a7]
+ rules:
+ - !reference [.except_mergequeue]
+ - when: on_success
needs:
- job: cluster_agent-build_amd64
artifacts: false
@@ -181,7 +145,9 @@ docker_build_cluster_agent_amd64:
docker_build_cluster_agent_arm64:
extends: .docker_build_job_definition_arm64
- rules: !reference [.on_tag_or_a7]
+ rules:
+ - !reference [.except_mergequeue]
+ - when: on_success
needs:
- job: cluster_agent-build_arm64
artifacts: false
@@ -194,7 +160,9 @@ docker_build_cluster_agent_arm64:
# build the cws-instrumentation image
docker_build_cws_instrumentation_amd64:
extends: .docker_build_job_definition_amd64
- rules: !reference [.on_tag_or_a7]
+ rules:
+ - !reference [.except_mergequeue]
+ - when: on_success
needs:
- job: cws_instrumentation-build_amd64
artifacts: false
@@ -204,32 +172,12 @@ docker_build_cws_instrumentation_amd64:
docker_build_cws_instrumentation_arm64:
extends: .docker_build_job_definition_arm64
- rules: !reference [.on_tag_or_a7]
+ rules:
+ - !reference [.except_mergequeue]
+ - when: on_success
needs:
- job: cws_instrumentation-build_arm64
artifacts: false
variables:
IMAGE: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent/cws-instrumentation
BUILD_CONTEXT: Dockerfiles/cws-instrumentation
-
-# build the dogstatsd image
-docker_build_dogstatsd_amd64:
- extends: .docker_build_job_definition_amd64
- rules: !reference [.on_a7]
- needs:
- - job: build_dogstatsd_static-binary_x64
- artifacts: false
- variables:
- IMAGE: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent/dogstatsd
- BUILD_CONTEXT: Dockerfiles/dogstatsd/alpine
-
-# build the dogstatsd image
-docker_build_dogstatsd_arm64:
- extends: .docker_build_job_definition_arm64
- rules: !reference [.on_a7]
- needs:
- - job: build_dogstatsd_static-binary_arm64
- artifacts: false
- variables:
- IMAGE: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent/dogstatsd
- BUILD_CONTEXT: Dockerfiles/dogstatsd/alpine
diff --git a/.gitlab/container_build/docker_windows.yml b/.gitlab/container_build/docker_windows.yml
index 0592b8cf464bb..1696dba6acfea 100644
--- a/.gitlab/container_build/docker_windows.yml
+++ b/.gitlab/container_build/docker_windows.yml
@@ -65,20 +65,12 @@
- docker rmi ${TARGET_TAG}
- If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" }
-.docker_build_agent7_windows_common:
- extends:
- - .docker_build_agent_windows_common
- rules: !reference [.on_a7]
- needs:
- ["windows_msi_and_bosh_zip_x64-a7", "build_windows_container_entrypoint"]
- variables:
- AGENT_ZIP: "datadog-agent-7*-x86_64.zip"
- BUILD_ARG: "--build-arg BASE_IMAGE=mcr.microsoft.com/powershell:nanoserver-${VARIANT} --build-arg WITH_JMX=${WITH_JMX} --build-arg VARIANT=${VARIANT} --build-arg INSTALL_INFO=nano-${VARIANT}"
-
.docker_build_agent6_windows_common:
extends:
- .docker_build_agent_windows_common
- rules: !reference [.on_a6]
+ rules:
+ - !reference [.except_mergequeue]
+ - when: on_success
needs: ["windows_msi_x64-a6", "build_windows_container_entrypoint"]
variables:
AGENT_ZIP: "datadog-agent-6*-x86_64.zip"
@@ -91,13 +83,5 @@
BUILD_ARG: "--build-arg BASE_IMAGE=mcr.microsoft.com/powershell:windowsservercore-${VARIANT} --build-arg WITH_JMX=${WITH_JMX} --build-arg VARIANT=${VARIANT} --build-arg INSTALL_INFO=core-${VARIANT}"
SERVERCORE: "-servercore"
-.docker_build_agent7_windows_servercore_common:
- extends:
- - .docker_build_agent7_windows_common
- variables:
- BUILD_ARG: "--build-arg BASE_IMAGE=mcr.microsoft.com/powershell:windowsservercore-${VARIANT} --build-arg WITH_JMX=${WITH_JMX} --build-arg VARIANT=${VARIANT} --build-arg INSTALL_INFO=core-${VARIANT}"
- SERVERCORE: "-servercore"
-
include:
- - /.gitlab/container_build/docker_windows_agent6.yml
- - /.gitlab/container_build/docker_windows_agent7.yml
+ - .gitlab/container_build/docker_windows_agent6.yml
diff --git a/.gitlab/container_build/docker_windows_agent7.yml b/.gitlab/container_build/docker_windows_agent7.yml
index e8bf47cbba812..1db375722d948 100644
--- a/.gitlab/container_build/docker_windows_agent7.yml
+++ b/.gitlab/container_build/docker_windows_agent7.yml
@@ -1,74 +1,74 @@
---
-docker_build_agent7_windows1809:
+docker_build_agent6_windows1809:
extends:
- - .docker_build_agent7_windows_common
+ - .docker_build_agent6_windows_common
tags: ["runner:windows-docker", "windowsversion:1809"]
variables:
VARIANT: 1809
- TAG_SUFFIX: -7
+ TAG_SUFFIX: -6
WITH_JMX: "false"
-docker_build_agent7_windows1809_jmx:
+docker_build_agent6_windows1809_jmx:
extends:
- - .docker_build_agent7_windows_common
+ - .docker_build_agent6_windows_common
tags: ["runner:windows-docker", "windowsversion:1809"]
variables:
VARIANT: 1809
- TAG_SUFFIX: -7-jmx
+ TAG_SUFFIX: -6-jmx
WITH_JMX: "true"
-docker_build_agent7_windows2022_jmx:
+docker_build_agent6_windows2022_jmx:
extends:
- - .docker_build_agent7_windows_common
+ - .docker_build_agent6_windows_common
tags: ["runner:windows-docker", "windowsversion:2022"]
- needs: ["windows_msi_and_bosh_zip_x64-a7", "build_windows_container_entrypoint"]
+ needs: ["windows_msi_and_bosh_zip_x64-a6", "build_windows_container_entrypoint"]
variables:
VARIANT: ltsc2022
- TAG_SUFFIX: -7-jmx
+ TAG_SUFFIX: -6-jmx
WITH_JMX: "true"
-docker_build_agent7_windows2022:
+docker_build_agent6_windows2022:
extends:
- - .docker_build_agent7_windows_common
+ - .docker_build_agent6_windows_common
tags: ["runner:windows-docker", "windowsversion:2022"]
variables:
VARIANT: ltsc2022
- TAG_SUFFIX: "-7"
+ TAG_SUFFIX: "-6"
WITH_JMX: "false"
-docker_build_agent7_windows1809_core:
+docker_build_agent6_windows1809_core:
extends:
- - .docker_build_agent7_windows_servercore_common
+ - .docker_build_agent6_windows_servercore_common
tags: ["runner:windows-docker", "windowsversion:1809"]
variables:
VARIANT: 1809
- TAG_SUFFIX: -7
+ TAG_SUFFIX: -6
WITH_JMX: "false"
-docker_build_agent7_windows1809_core_jmx:
+docker_build_agent6_windows1809_core_jmx:
extends:
- - .docker_build_agent7_windows_servercore_common
+ - .docker_build_agent6_windows_servercore_common
tags: ["runner:windows-docker", "windowsversion:1809"]
variables:
VARIANT: 1809
- TAG_SUFFIX: -7-jmx
+ TAG_SUFFIX: -6-jmx
WITH_JMX: "true"
-docker_build_agent7_windows2022_core:
+docker_build_agent6_windows2022_core:
extends:
- - .docker_build_agent7_windows_servercore_common
+ - .docker_build_agent6_windows_servercore_common
tags: ["runner:windows-docker", "windowsversion:2022"]
variables:
VARIANT: ltsc2022
- TAG_SUFFIX: "-7"
+ TAG_SUFFIX: "-6"
WITH_JMX: "false"
-docker_build_agent7_windows2022_core_jmx:
+docker_build_agent6_windows2022_core_jmx:
extends:
- - .docker_build_agent7_windows_servercore_common
+ - .docker_build_agent6_windows_servercore_common
tags: ["runner:windows-docker", "windowsversion:2022"]
- needs: ["windows_msi_and_bosh_zip_x64-a7", "build_windows_container_entrypoint"]
+ needs: ["windows_msi_and_bosh_zip_x64-a6", "build_windows_container_entrypoint"]
variables:
VARIANT: ltsc2022
- TAG_SUFFIX: -7-jmx
+ TAG_SUFFIX: -6-jmx
WITH_JMX: "true"
diff --git a/.gitlab/container_build/include.yml b/.gitlab/container_build/include.yml
index cc3f9e653ffe1..5ea8352b8fc62 100644
--- a/.gitlab/container_build/include.yml
+++ b/.gitlab/container_build/include.yml
@@ -3,6 +3,6 @@
# Contains jobs to build container images of the Agent.
include:
- - /.gitlab/container_build/docker_linux.yml
- - /.gitlab/container_build/docker_windows.yml
- - /.gitlab/container_build/fakeintake.yml
+ - .gitlab/container_build/docker_linux.yml
+ - .gitlab/container_build/docker_windows.yml
+ - .gitlab/container_build/fakeintake.yml
diff --git a/.gitlab/container_scan/container_scan.yml b/.gitlab/container_scan/container_scan.yml
index 464aa047dfe5e..2e63a914420bc 100644
--- a/.gitlab/container_scan/container_scan.yml
+++ b/.gitlab/container_scan/container_scan.yml
@@ -4,23 +4,11 @@
# (datadog/agent-scan and datadog/dogstatsd-scan) to be scanned.
# push every night to docker hub agent-scan repo
-scan_nightly-dogstatsd:
- extends: .docker_publish_job_definition
- stage: container_scan
- rules:
- !reference [.on_deploy_nightly_repo_branch_a7]
- needs:
- - docker_build_dogstatsd_amd64
- variables:
- IMG_REGISTRIES: dev
- IMG_SOURCES: ${SRC_DSD}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64
- IMG_DESTINATIONS: dogstatsd-scan:${BUCKET_BRANCH}
-
scan_nightly-a6:
extends: .docker_publish_job_definition
stage: container_scan
rules:
- !reference [.on_deploy_nightly_repo_branch_a6]
+ !reference [.on_deploy_nightly_repo_branch]
needs:
- docker_build_agent6
- docker_build_agent6_jmx
@@ -33,52 +21,22 @@ scan_nightly-a6:
- IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-jmx-amd64
IMG_DESTINATIONS: agent-scan:${BUCKET_BRANCH}-py2-jmx
-scan_nightly-a7:
- extends: .docker_publish_job_definition
- stage: container_scan
- rules:
- !reference [.on_deploy_nightly_repo_branch_a7]
- needs:
- - docker_build_agent7
- - docker_build_agent7_jmx
- variables:
- IMG_REGISTRIES: dev
- parallel:
- matrix:
- - IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-amd64
- IMG_DESTINATIONS: agent-scan:${BUCKET_BRANCH}-py3
- - IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx-amd64
- IMG_DESTINATIONS: agent-scan:${BUCKET_BRANCH}-py3-jmx
-
dca_scan_nightly:
extends: .docker_publish_job_definition
stage: container_scan
rules:
- !reference [.on_deploy_nightly_repo_branch_a7]
+ !reference [.on_deploy_nightly_repo_branch]
needs: ["docker_build_cluster_agent_amd64"]
variables:
IMG_REGISTRIES: dev
IMG_SOURCES: ${SRC_DCA}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64
IMG_DESTINATIONS: cluster-agent-scan:${BUCKET_BRANCH}
-# push on master to docker hub agent-scan repo
-scan_master-dogstatsd:
- extends: .docker_publish_job_definition
- stage: container_scan
- rules:
- !reference [.on_main_a7]
- needs:
- - docker_build_dogstatsd_amd64
- variables:
- IMG_REGISTRIES: dev
- IMG_SOURCES: ${SRC_DSD}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64
- IMG_DESTINATIONS: dogstatsd-scan:master
-
scan_master-a6:
extends: .docker_publish_job_definition
stage: container_scan
rules:
- !reference [.on_main_a6]
+ !reference [.on_main]
needs:
- docker_build_agent6
- docker_build_agent6_jmx
@@ -91,28 +49,11 @@ scan_master-a6:
- IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-jmx-amd64
IMG_DESTINATIONS: agent-scan:master-py2-jmx
-scan_master-a7:
- extends: .docker_publish_job_definition
- stage: container_scan
- rules:
- !reference [.on_main_a7]
- needs:
- - docker_build_agent7
- - docker_build_agent7_jmx
- variables:
- IMG_REGISTRIES: dev
- parallel:
- matrix:
- - IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-amd64
- IMG_DESTINATIONS: agent-scan:master-py3
- - IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx-amd64
- IMG_DESTINATIONS: agent-scan:master-py3-jmx
-
dca_scan_master:
extends: .docker_publish_job_definition
stage: container_scan
rules:
- !reference [.on_main_a7]
+ !reference [.on_main]
needs: ["docker_build_cluster_agent_amd64"]
variables:
IMG_REGISTRIES: dev
diff --git a/.gitlab/deploy_containers/deploy_containers.yml b/.gitlab/deploy_containers/deploy_containers.yml
index 2b5064acf9338..d866827017b65 100644
--- a/.gitlab/deploy_containers/deploy_containers.yml
+++ b/.gitlab/deploy_containers/deploy_containers.yml
@@ -1,6 +1,6 @@
---
# deploy containers stage
-# Contains jobs which create child pipelines to deploy Agent 6 & 7 to staging repositories and to Dockerhub / GCR.
+# Contains jobs which create child pipelines to deploy Agent 6 to staging repositories and to Dockerhub / GCR.
#
# Agent v6
@@ -9,7 +9,7 @@
deploy_containers-a6:
stage: deploy_containers
rules:
- !reference [.on_deploy_a6]
+ !reference [.on_deploy]
variables:
PARENT_PIPELINE_ID: $CI_PIPELINE_ID
BUCKET_BRANCH: $BUCKET_BRANCH
@@ -19,33 +19,10 @@ deploy_containers-a6:
deploy_containers-a6-on-failure:
stage: deploy_containers
rules:
- !reference [.on_deploy_a6_failure]
+ !reference [.on_deploy_failure]
variables:
PARENT_PIPELINE_ID: $CI_PIPELINE_ID
BUCKET_BRANCH: $BUCKET_BRANCH
FORCE_MANUAL: "true"
trigger:
include: .gitlab/deploy_containers/deploy_containers_a6.yml
-
-
-deploy_containers-a7:
- stage: deploy_containers
- rules:
- !reference [.on_deploy_a7]
- variables:
- PARENT_PIPELINE_ID: $CI_PIPELINE_ID
- BUCKET_BRANCH: $BUCKET_BRANCH
- trigger:
- include: .gitlab/deploy_containers/deploy_containers_a7.yml
-
-
-deploy_containers-a7-on-failure:
- stage: deploy_containers
- rules:
- !reference [.on_deploy_a7_failure]
- variables:
- PARENT_PIPELINE_ID: $CI_PIPELINE_ID
- BUCKET_BRANCH: $BUCKET_BRANCH
- FORCE_MANUAL: "true"
- trigger:
- include: .gitlab/deploy_containers/deploy_containers_a7.yml
diff --git a/.gitlab/deploy_containers/deploy_containers_a6.yml b/.gitlab/deploy_containers/deploy_containers_a6.yml
index d21734d14b596..00054abf91255 100644
--- a/.gitlab/deploy_containers/deploy_containers_a6.yml
+++ b/.gitlab/deploy_containers/deploy_containers_a6.yml
@@ -1,13 +1,13 @@
---
# deploy containers stage
-# Contains jobs which deploy Agent 6 & 7 to staging repositories and to Dockerhub / GCR.
+# Contains jobs which deploy Agent 6 to staging repositories and to Dockerhub / GCR.
stages:
- deploy_containers
include:
- - /.gitlab/common/container_publish_job_templates.yml
- - /.gitlab/deploy_containers/conditions.yml
+ - .gitlab/common/container_publish_job_templates.yml
+ - .gitlab/deploy_containers/conditions.yml
#
# Image tagging & manifest publication
diff --git a/.gitlab/deploy_containers/deploy_containers_a7.yml b/.gitlab/deploy_containers/deploy_containers_a7.yml
index bd0baf146762e..c7ef9ad0f4b11 100644
--- a/.gitlab/deploy_containers/deploy_containers_a7.yml
+++ b/.gitlab/deploy_containers/deploy_containers_a7.yml
@@ -1,36 +1,36 @@
---
# deploy containers stage
-# Contains jobs which deploy Agent 6 & 7 to staging repositories and to Dockerhub / GCR.
+# Contains jobs which deploy Agent 6 to staging repositories and to Dockerhub / GCR.
stages:
- deploy_containers
include:
- - /.gitlab/common/container_publish_job_templates.yml
- - /.gitlab/deploy_containers/conditions.yml
+ - .gitlab/common/container_publish_job_templates.yml
+ - .gitlab/deploy_containers/conditions.yml
#
# Image tagging & manifest publication
#
#
-# Agent v7
+# Agent v6
#
-.deploy_containers-a7-base:
+.deploy_containers-a6-base:
extends: .docker_publish_job_definition
stage: deploy_containers
dependencies: []
before_script:
- source /root/.bashrc
- - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 7 --url-safe --pipeline-id $PARENT_PIPELINE_ID)"; fi
+ - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 6 --url-safe --pipeline-id $PARENT_PIPELINE_ID)"; fi
- export IMG_BASE_SRC="${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
- - export IMG_LINUX_SOURCES="${IMG_BASE_SRC}-7${JMX}-amd64,${IMG_BASE_SRC}-7${JMX}-arm64"
- - export IMG_WINDOWS_SOURCES="${IMG_BASE_SRC}-7${JMX}-win1809${FLAVOR}-amd64,${IMG_BASE_SRC}-7${JMX}-winltsc2022${FLAVOR}-amd64"
+ - export IMG_LINUX_SOURCES="${IMG_BASE_SRC}-6${JMX}-amd64,${IMG_BASE_SRC}-6${JMX}-arm64"
+ - export IMG_WINDOWS_SOURCES="${IMG_BASE_SRC}-6${JMX}-win1809${FLAVOR}-amd64,${IMG_BASE_SRC}-6${JMX}-winltsc2022${FLAVOR}-amd64"
- if [[ "$FLAVOR" == "-linux" ]]; then export IMG_SOURCES="${IMG_LINUX_SOURCES}"; elif [[ "$FLAVOR" == "-servercore" ]]; then export IMG_SOURCES="${IMG_WINDOWS_SOURCES}"; else export IMG_SOURCES="${IMG_LINUX_SOURCES},${IMG_WINDOWS_SOURCES}"; fi
- export IMG_DESTINATIONS="${AGENT_REPOSITORY}:${VERSION}${FLAVOR}${JMX}"
-.deploy_containers-a7_external:
- extends: .deploy_containers-a7-base
+.deploy_containers-a6_external:
+ extends: .deploy_containers-a6-base
parallel:
matrix:
- JMX:
@@ -42,29 +42,17 @@ include:
- "-linux"
-deploy_containers-a7:
- extends: .deploy_containers-a7_external
+deploy_containers-a6:
+ extends: .deploy_containers-a6_external
rules:
!reference [.manual_on_deploy_auto_on_rc]
-deploy_containers-a7-rc:
- extends: .deploy_containers-a7_external
+deploy_containers-a6-rc:
+ extends: .deploy_containers-a6_external
rules:
!reference [.on_rc]
variables:
- VERSION: 7-rc
-
-deploy_containers-dogstatsd:
- extends: .docker_publish_job_definition
- stage: deploy_containers
- rules:
- !reference [.manual_on_deploy_auto_on_rc]
- dependencies: []
- before_script:
- - source /root/.bashrc
- - export VERSION="$(inv agent.version --major-version 7 --url-safe --pipeline-id $PARENT_PIPELINE_ID)"
- - export IMG_SOURCES="${SRC_DSD}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64,${SRC_DSD}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-arm64"
- - export IMG_DESTINATIONS="${DSD_REPOSITORY}:${VERSION}"
+ VERSION: 6-rc
deploy_containers-a7_internal:
extends: .deploy_containers-a7-base
@@ -74,19 +62,19 @@ deploy_containers-a7_internal:
JMX: "-jmx"
-deploy_containers-a7_internal-rc:
- extends: .deploy_containers-a7-base
+deploy_containers-a6_internal-rc:
+ extends: .deploy_containers-a6-base
rules:
!reference [.on_internal_rc]
variables:
- VERSION: 7-rc
+ VERSION: 6-rc
#
# Latest publication
#
-deploy_containers_latest-a7:
+deploy_containers_latest-a6:
extends: .docker_publish_job_definition
stage: deploy_containers
rules:
@@ -94,21 +82,21 @@ deploy_containers_latest-a7:
dependencies: []
parallel:
matrix:
- - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7"
+ - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6"
IMG_SOURCES: "%BASE%-amd64,%BASE%-arm64,%BASE%-win1809-amd64,%BASE%-winltsc2022-amd64"
- IMG_DESTINATIONS: ${AGENT_REPOSITORY}:7,${AGENT_REPOSITORY}:latest
- - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx"
+ IMG_DESTINATIONS: ${AGENT_REPOSITORY}:6,${AGENT_REPOSITORY}:latest
+ - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-jmx"
IMG_SOURCES: "%BASE%-amd64,%BASE%-arm64,%BASE%-win1809-amd64,%BASE%-winltsc2022-amd64"
- IMG_DESTINATIONS: ${AGENT_REPOSITORY}:7-jmx,${AGENT_REPOSITORY}:latest-jmx
- - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7"
+ IMG_DESTINATIONS: ${AGENT_REPOSITORY}:6-jmx,${AGENT_REPOSITORY}:latest-jmx
+ - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6"
IMG_SOURCES: "%BASE%-win1809-servercore-amd64,%BASE%-winltsc2022-servercore-amd64"
- IMG_DESTINATIONS: ${AGENT_REPOSITORY}:7-servercore,${AGENT_REPOSITORY}:latest-servercore
- - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx"
+ IMG_DESTINATIONS: ${AGENT_REPOSITORY}:6-servercore,${AGENT_REPOSITORY}:latest-servercore
+ - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-jmx"
IMG_SOURCES: "%BASE%-win1809-servercore-amd64,%BASE%-winltsc2022-servercore-amd64"
- IMG_DESTINATIONS: ${AGENT_REPOSITORY}:7-servercore-jmx,${AGENT_REPOSITORY}:latest-servercore-jmx
+ IMG_DESTINATIONS: ${AGENT_REPOSITORY}:6-servercore-jmx,${AGENT_REPOSITORY}:latest-servercore-jmx
-deploy_containers_latest-a7_internal:
+deploy_containers_latest-a6_internal:
extends: .docker_publish_job_definition
stage: deploy_containers
rules:
@@ -116,17 +104,6 @@ deploy_containers_latest-a7_internal:
dependencies: []
parallel:
matrix:
- - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx"
+ - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-jmx"
IMG_SOURCES: "%BASE%-amd64,%BASE%-arm64,%BASE%-win1809-amd64,%BASE%-winltsc2022-amd64"
- IMG_DESTINATIONS: ${AGENT_REPOSITORY}:7-jmx
-
-
-deploy_containers_latest-dogstatsd:
- extends: .docker_publish_job_definition
- stage: deploy_containers
- rules:
- !reference [.on_final]
- dependencies: []
- variables:
- IMG_SOURCES: ${SRC_DSD}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64,${SRC_DSD}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-arm64
- IMG_DESTINATIONS: ${DSD_REPOSITORY}:7,${DSD_REPOSITORY}:latest
+ IMG_DESTINATIONS: ${AGENT_REPOSITORY}:6-jmx
diff --git a/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml b/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml
index 6bd710bc72e78..7edd6362359d6 100644
--- a/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml
+++ b/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml
@@ -1,6 +1,6 @@
---
include:
- - /.gitlab/common/container_publish_job_templates.yml
+ - .gitlab/common/container_publish_job_templates.yml
#
# CWS Instrumentation image tagging & manifest publication
@@ -12,32 +12,32 @@ include:
dependencies: []
before_script:
- source /root/.bashrc
- - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 7 --url-safe)"; fi
+ - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 6 --url-safe)"; fi
- if [[ "$CWS_INSTRUMENTATION_REPOSITORY" == "" ]]; then export CWS_INSTRUMENTATION_REPOSITORY="cws-instrumentation"; fi
- export IMG_BASE_SRC="${SRC_CWS_INSTRUMENTATION}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
- export IMG_SOURCES="${IMG_BASE_SRC}-amd64,${IMG_BASE_SRC}-arm64"
- export IMG_DESTINATIONS="${CWS_INSTRUMENTATION_REPOSITORY}:${VERSION}"
-# will push the `7.xx.y-rc.z` tags
+# will push the `6.xx.y-rc.z` tags
deploy_containers-cws-instrumentation-rc-versioned:
extends: .deploy_containers-cws-instrumentation-base
- rules: !reference [.on_deploy_a7_rc]
+ rules: !reference [.on_deploy_rc]
# will update the `rc` tag
deploy_containers-cws-instrumentation-rc-mutable:
extends: .deploy_containers-cws-instrumentation-base
- rules: !reference [.on_deploy_a7_rc]
+ rules: !reference [.on_deploy_rc]
variables:
VERSION: rc
-# will push the `7.xx.y` tags
+# will push the `6.xx.y` tags
deploy_containers-cws-instrumentation-final-versioned:
extends: .deploy_containers-cws-instrumentation-base
- rules: !reference [.on_deploy_a7_manual_final]
+ rules: !reference [.on_deploy_manual_final]
# will update the `latest` tag
deploy_containers-cws-instrumentation-latest:
extends: .deploy_containers-cws-instrumentation-base
- rules: !reference [.on_deploy_a7_manual_final]
+ rules: !reference [.on_deploy_manual_final]
variables:
VERSION: latest
diff --git a/.gitlab/deploy_dca/deploy_dca.yml b/.gitlab/deploy_dca/deploy_dca.yml
index c1617f8ffb84c..66b24807c50a9 100644
--- a/.gitlab/deploy_dca/deploy_dca.yml
+++ b/.gitlab/deploy_dca/deploy_dca.yml
@@ -1,6 +1,6 @@
---
include:
- - /.gitlab/common/container_publish_job_templates.yml
+ - .gitlab/common/container_publish_job_templates.yml
#
# DCA image tagging & manifest publication
@@ -16,7 +16,7 @@ include:
artifacts: false
before_script:
- source /root/.bashrc
- - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 7 --url-safe)"; fi
+ - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 6 --url-safe)"; fi
- if [[ "$CLUSTER_AGENT_REPOSITORY" == "" ]]; then export CLUSTER_AGENT_REPOSITORY="cluster-agent"; fi
- export IMG_BASE_SRC="${SRC_DCA}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
- export IMG_SOURCES="${IMG_BASE_SRC}-amd64,${IMG_BASE_SRC}-arm64"
@@ -24,32 +24,32 @@ include:
deploy_containers-dca:
extends: .deploy_containers-dca-base
- rules: !reference [.on_deploy_a7_manual_auto_on_rc]
+ rules: !reference [.on_deploy_manual_auto_on_rc]
deploy_containers-dca-rc:
extends: .deploy_containers-dca-base
- rules: !reference [.on_deploy_a7_rc]
+ rules: !reference [.on_deploy_rc]
variables:
VERSION: rc
deploy_containers-dca-latest:
extends: .deploy_containers-dca-base
- rules: !reference [.on_deploy_a7_manual_final]
+ rules: !reference [.on_deploy_manual_final]
variables:
VERSION: latest
deploy_containers-dca_internal:
extends: .deploy_containers-dca-base
- rules: !reference [.on_deploy_a7_internal_manual_final]
+ rules: !reference [.on_deploy_internal_manual_final]
deploy_containers-dca_internal-rc:
extends: .deploy_containers-dca-base
- rules: !reference [.on_deploy_a7_internal_rc]
+ rules: !reference [.on_deploy_internal_rc]
variables:
VERSION: rc
deploy_containers-dca_internal-latest:
extends: .deploy_containers-dca-base
- rules: !reference [.on_deploy_a7_internal_manual_final]
+ rules: !reference [.on_deploy_internal_manual_final]
variables:
VERSION: latest
diff --git a/.gitlab/deploy_packages/cluster_agent_cloudfoundry.yml b/.gitlab/deploy_packages/cluster_agent_cloudfoundry.yml
index 1c965a021d7de..0956683d8b02a 100644
--- a/.gitlab/deploy_packages/cluster_agent_cloudfoundry.yml
+++ b/.gitlab/deploy_packages/cluster_agent_cloudfoundry.yml
@@ -1,7 +1,7 @@
---
deploy_cluster_agent_cloudfoundry:
rules:
- !reference [.on_deploy_a7]
+ !reference [.on_deploy]
stage: deploy_packages
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
tags: ["arch:amd64"]
diff --git a/.gitlab/deploy_packages/deploy_common.yml b/.gitlab/deploy_packages/deploy_common.yml
index 09ee6b9210a09..b888e44a2383c 100644
--- a/.gitlab/deploy_packages/deploy_common.yml
+++ b/.gitlab/deploy_packages/deploy_common.yml
@@ -12,18 +12,10 @@
extends: .deploy_packages_deb
stage: deploy_packages
rules:
- !reference [.on_deploy_a6]
+ !reference [.on_deploy]
variables:
MAJOR_VERSION: 6
-.deploy_packages_deb-7:
- extends: .deploy_packages_deb
- stage: deploy_packages
- rules:
- !reference [.on_deploy_a7]
- variables:
- MAJOR_VERSION: 7
-
.deploy_packages_rpm:
resource_group: rpm_bucket
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
@@ -39,18 +31,10 @@
extends: .deploy_packages_rpm
stage: deploy_packages
rules:
- !reference [.on_deploy_a6]
+ !reference [.on_deploy]
variables:
MAJOR_VERSION: 6
-.deploy_packages_rpm-7:
- extends: .deploy_packages_rpm
- stage: deploy_packages
- rules:
- !reference [.on_deploy_a7]
- variables:
- MAJOR_VERSION: 7
-
.deploy_packages_suse_rpm:
extends: .deploy_packages_rpm
variables:
@@ -61,40 +45,6 @@
extends: .deploy_packages_suse_rpm
stage: deploy_packages
rules:
- !reference [.on_deploy_a6]
+ !reference [.on_deploy]
variables:
MAJOR_VERSION: 6
-
-.deploy_packages_suse_rpm-7:
- extends: .deploy_packages_suse_rpm
- stage: deploy_packages
- rules:
- !reference [.on_deploy_a7]
- variables:
- MAJOR_VERSION: 7
-
-deploy_packages_oci:
- resource_group: oci_bucket
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
- needs: [ "packaging_oci" ]
- tags: ["arch:amd64"]
- stage: deploy_packages
- before_script:
- - ls $OMNIBUS_PACKAGE_DIR
- rules:
- !reference [.on_deploy_a7]
- script:
- - python3 -m pip install -r tasks/libs/requirements-github.txt
- - set +x
- - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh ci.datadog-agent.platform-github-app-key)
- - export GITHUB_APP_ID=682216
- - export GH_TOKEN=$(inv -e github.get-token-from-app --app-id-env=GITHUB_APP_ID --pkey-env=GITHUB_KEY_B64)
- - export VERSION=$(inv agent.version --url-safe)-1
- - git config --global credential.helper '!f() { echo username=x-access-token; echo "password=$GH_TOKEN"; };f'
- - git clone https://github.com/DataDog/datadog-packages /tmp/datadog-packages
- - cd /tmp/datadog-packages/cmd/datadog-package
- - go build .
- - ./datadog-package push registry.ddbuild.io/ci/remote-updates/datadog-agent:${VERSION} ${OMNIBUS_PACKAGE_DIR}/datadog-agent-${MAJOR_VERSION}.*.oci.tar
- variables:
- MAJOR_VERSION: 7
-
diff --git a/.gitlab/deploy_packages/include.yml b/.gitlab/deploy_packages/include.yml
index 1e0f87c545751..b430387982f71 100644
--- a/.gitlab/deploy_packages/include.yml
+++ b/.gitlab/deploy_packages/include.yml
@@ -1,12 +1,11 @@
---
# deploy_packages stage
-# Contains jobs which deploy Agent 6 & 7 to staging repositories.
+# Contains jobs which deploy Agent 6 to staging repositories.
# Jobs are expected to depend on the underlying build job and
# start as soon as possible.
include:
- - /.gitlab/deploy_packages/cluster_agent_cloudfoundry.yml
- - /.gitlab/deploy_packages/deploy_common.yml
- - /.gitlab/deploy_packages/nix.yml
- - /.gitlab/deploy_packages/windows.yml
- - /.gitlab/deploy_packages/winget.yml
+ - .gitlab/deploy_packages/cluster_agent_cloudfoundry.yml
+ - .gitlab/deploy_packages/deploy_common.yml
+ - .gitlab/deploy_packages/nix.yml
+ - .gitlab/deploy_packages/windows.yml
diff --git a/.gitlab/deploy_packages/nix.yml b/.gitlab/deploy_packages/nix.yml
index 2d5215c35b53c..3c414a0202593 100644
--- a/.gitlab/deploy_packages/nix.yml
+++ b/.gitlab/deploy_packages/nix.yml
@@ -39,195 +39,8 @@ deploy_packages_suse_rpm-x64-6:
variables:
PACKAGE_ARCH: x86_64
-#
-# Agent v7
-#
-deploy_packages_deb-x64-7:
- extends: .deploy_packages_deb-7
- needs: [ agent_deb-x64-a7 ]
- variables:
- PACKAGE_ARCH: amd64
-
-deploy_packages_deb-arm64-7:
- extends: .deploy_packages_deb-7
- needs: [ agent_deb-arm64-a7 ]
- variables:
- PACKAGE_ARCH: arm64
-
-deploy_packages_heroku_deb-x64-7:
- extends: .deploy_packages_deb-7
- needs: [ agent_heroku_deb-x64-a7 ]
- variables:
- PACKAGE_ARCH: amd64
-
-deploy_packages_iot_deb-x64-7:
- extends: .deploy_packages_deb-7
- needs: [ iot_agent_deb-x64 ]
- variables:
- PACKAGE_ARCH: amd64
-
-deploy_packages_iot_deb-arm64-7:
- extends: .deploy_packages_deb-7
- needs: [ iot_agent_deb-arm64 ]
- variables:
- PACKAGE_ARCH: arm64
-
-deploy_packages_iot_deb-armhf-7:
- extends: .deploy_packages_deb-7
- needs: [ iot_agent_deb-armhf ]
- variables:
- PACKAGE_ARCH: armhf
-
-deploy_packages_dogstatsd_deb-x64-7:
- extends: .deploy_packages_deb-7
- needs: [ dogstatsd_deb-x64 ]
- variables:
- PACKAGE_ARCH: amd64
-
-deploy_packages_dogstatsd_deb-arm64-7:
- extends: .deploy_packages_deb-7
- needs: [ dogstatsd_deb-arm64 ]
- variables:
- PACKAGE_ARCH: arm64
-
-deploy_packages_agentless_scanner_deb-x64-7:
- extends: .deploy_packages_deb-7
- needs: [ agentless_scanner_deb-x64 ]
- variables:
- PACKAGE_ARCH: amd64
-
-deploy_packages_agentless_scanner_deb-arm64-7:
- extends: .deploy_packages_deb-7
- needs: [ agentless_scanner_deb-arm64 ]
- variables:
- PACKAGE_ARCH: arm64
-
-deploy_packages_rpm-x64-7:
- extends: .deploy_packages_rpm-7
- needs: [ agent_rpm-x64-a7 ]
- variables:
- PACKAGE_ARCH: x86_64
-
-deploy_packages_rpm-arm64-7:
- extends: .deploy_packages_rpm-7
- needs: [ agent_rpm-arm64-a7 ]
- variables:
- PACKAGE_ARCH: aarch64
-
-deploy_packages_iot_rpm-x64-7:
- extends: .deploy_packages_rpm-7
- needs: [ iot_agent_rpm-x64 ]
- variables:
- PACKAGE_ARCH: x86_64
-
-deploy_packages_iot_rpm-arm64-7:
- extends: .deploy_packages_rpm-7
- needs: [ iot_agent_rpm-arm64 ]
- variables:
- PACKAGE_ARCH: aarch64
-
-deploy_packages_iot_rpm-armhf-7:
- extends: .deploy_packages_rpm-7
- needs: [ iot_agent_rpm-armhf ]
- variables:
- PACKAGE_ARCH: armv7hl
-
-deploy_packages_dogstatsd_rpm-x64-7:
- extends: .deploy_packages_rpm-7
- needs: [ dogstatsd_rpm-x64 ]
- variables:
- PACKAGE_ARCH: x86_64
-
-deploy_packages_agentless_scanner_rpm-x64-7:
- extends: .deploy_packages_rpm-7
- needs: [ agentless_scanner_rpm-x64 ]
- variables:
- PACKAGE_ARCH: x86_64
-
-deploy_packages_agentless_scanner_rpm-arm64-7:
- extends: .deploy_packages_rpm-7
- needs: [ agentless_scanner_rpm-arm64 ]
- variables:
- PACKAGE_ARCH: aarch64
-
-deploy_packages_suse_rpm-x64-7:
- extends: .deploy_packages_suse_rpm-7
- needs: [ agent_suse-x64-a7 ]
- variables:
- PACKAGE_ARCH: x86_64
-
-deploy_packages_suse_rpm-arm64-7:
- extends: .deploy_packages_suse_rpm-7
- needs: [ agent_suse-arm64-a7 ]
+deploy_packages_suse_rpm-arm64-6:
+ extends: .deploy_packages_suse_rpm-6
+ needs: [ agent_suse-arm64-a6 ]
variables:
PACKAGE_ARCH: aarch64
-
-deploy_packages_iot_suse_rpm-x64-7:
- extends: .deploy_packages_suse_rpm-7
- needs: [ iot_agent_suse-x64 ]
- variables:
- PACKAGE_ARCH: x86_64
-
-deploy_packages_dogstatsd_suse_rpm-x64-7:
- extends: .deploy_packages_suse_rpm-7
- needs: [ dogstatsd_suse-x64 ]
- variables:
- PACKAGE_ARCH: x86_64
-
-deploy_packages_agentless_scanner_suse_rpm-x64-7:
- extends: .deploy_packages_suse_rpm-7
- needs: [ agentless_scanner_suse-x64 ]
- variables:
- PACKAGE_ARCH: x86_64
-
-deploy_packages_dmg-x64-a7:
- rules:
- !reference [.on_deploy_a7]
- stage: deploy_packages
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
- tags: ["arch:amd64"]
- needs: ["agent_dmg-x64-a7"]
- before_script:
- - ls $OMNIBUS_PACKAGE_DIR
- script:
- - $S3_CP_CMD --recursive --exclude "*" --include "datadog-agent-7*.dmg" $OMNIBUS_PACKAGE_DIR $S3_RELEASE_ARTIFACTS_URI/dmg/x86_64/ || true
-
-# deploy dogstatsd x64, non-static binary to staging bucket
-deploy_staging_dsd:
- rules:
- !reference [.on_deploy_a7]
- stage: deploy_packages
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
- tags: ["arch:amd64"]
- needs: ["build_dogstatsd-binary_x64"]
- script:
- - $S3_CP_CMD $S3_ARTIFACTS_URI/dogstatsd/dogstatsd ./dogstatsd
- - export PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7)
- - $S3_CP_CMD ./dogstatsd $S3_DSD6_URI/linux/dogstatsd-$PACKAGE_VERSION --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=id=3a6e02b08553fd157ae3fb918945dd1eaae5a1aa818940381ef07a430cf25732
-
-# deploy iot-agent x64 binary to staging bucket
-deploy_staging_iot_agent:
- rules:
- !reference [.on_deploy_a7]
- stage: deploy_packages
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
- tags: ["arch:amd64"]
- needs: ["build_iot_agent-binary_x64"]
- script:
- - $S3_CP_CMD $S3_ARTIFACTS_URI/iot/agent ./agent
- - export PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7)
- - $S3_CP_CMD ./agent $S3_DSD6_URI/linux/iot/agent-$PACKAGE_VERSION --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=id=3a6e02b08553fd157ae3fb918945dd1eaae5a1aa818940381ef07a430cf25732
-
-# Remote Updater
-deploy_remote_updater_deb_arm64:
- extends: .deploy_packages_deb-7
- needs: ["updater_deb-arm64"]
- variables:
- PACKAGE_ARCH: arm64
-
-deploy_remote_updater_deb_amd64:
- extends: .deploy_packages_deb-7
- needs: ["updater_deb-amd64"]
- variables:
- PACKAGE_ARCH: amd64
-
diff --git a/.gitlab/deploy_packages/windows.yml b/.gitlab/deploy_packages/windows.yml
index 4733429c08f36..a070398ff946f 100644
--- a/.gitlab/deploy_packages/windows.yml
+++ b/.gitlab/deploy_packages/windows.yml
@@ -4,7 +4,7 @@
#
deploy_packages_windows-x64-6:
rules:
- !reference [.on_deploy_a6]
+ !reference [.on_deploy]
stage: deploy_packages
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
tags: ["arch:amd64"]
@@ -19,33 +19,13 @@ deploy_packages_windows-x64-6:
--include "datadog-agent-6*.debug.zip"
$OMNIBUS_PACKAGE_DIR $S3_RELEASE_ARTIFACTS_URI/msi/x86_64/ || true
-#
-# Agent v7
-#
-deploy_packages_windows-x64-7:
- rules:
- !reference [.on_deploy_a7]
- stage: deploy_packages
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
- tags: ["arch:amd64"]
- needs: ["windows_msi_and_bosh_zip_x64-a7"]
- before_script:
- - ls $OMNIBUS_PACKAGE_DIR
- script:
- - $S3_CP_CMD
- --recursive
- --exclude "*"
- --include "datadog-agent-7*.msi"
- --include "datadog-agent-7*.debug.zip"
- $OMNIBUS_PACKAGE_DIR $S3_RELEASE_ARTIFACTS_URI/msi/x86_64/ || true
-
-deploy_staging_windows_tags-7:
+deploy_staging_windows_tags-6:
rules:
- !reference [.on_deploy_stable_or_beta_repo_branch_a7]
+ !reference [.on_deploy_stable_or_beta_repo_branch]
stage: deploy_packages
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
tags: ["arch:amd64"]
- needs: ["windows_msi_and_bosh_zip_x64-a7", "windows_zip_agent_binaries_x64-a7"]
+ needs: ["windows_msi_and_bosh_zip_x64-a6", "windows_zip_agent_binaries_x64-a6"]
before_script:
- ls $OMNIBUS_PACKAGE_DIR
script:
@@ -53,17 +33,17 @@ deploy_staging_windows_tags-7:
- $S3_CP_CMD
--recursive
--exclude "*"
- --include "datadog-agent-7.*.zip"
+ --include "datadog-agent-6.*.zip"
$OMNIBUS_PACKAGE_DIR
- $S3_DSD6_URI/windows/agent7/bosh/
+ $S3_DSD6_URI/windows/agent6/bosh/
--grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers
full=id=3a6e02b08553fd157ae3fb918945dd1eaae5a1aa818940381ef07a430cf25732
# used for cloudfoundry buildpack and azure-app-services
- $S3_CP_CMD
--recursive
--exclude "*"
- --include "agent-binaries-7.*.zip"
- $OMNIBUS_PACKAGE_DIR $S3_DSD6_URI/windows/agent7/buildpack/
+ --include "agent-binaries-6.*.zip"
+ $OMNIBUS_PACKAGE_DIR $S3_DSD6_URI/windows/agent6/buildpack/
--grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers
full=id=3a6e02b08553fd157ae3fb918945dd1eaae5a1aa818940381ef07a430cf25732
diff --git a/.gitlab/deploy_packages/winget.yml b/.gitlab/deploy_packages/winget.yml
deleted file mode 100644
index d2708608510a8..0000000000000
--- a/.gitlab/deploy_packages/winget.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-# deploy_packages stage
-# Contains a job which deploys the Winget Agent package.
-
-publish_winget_7_x64:
- dependencies: []
- rules: !reference [.on_deploy_stable_or_beta_repo_branch_a7_manual]
- stage: deploy_packages
- tags: ["runner:windows-docker", "windowsversion:1809"]
- variables:
- ARCH: "x64"
- before_script:
- - $wingetPat = (aws ssm get-parameter --region us-east-1 --name $WINGET_PAT_SSM_NAME --with-decryption --query "Parameter.Value" --out text)
- script:
- - '$_instance_id = (iwr -UseBasicParsing http://169.254.169.254/latest/meta-data/instance-id).content ; Write-Host "Running on instance $($_instance_id)"'
- - $ErrorActionPreference = "Stop"
- - >
- docker run --rm
- -v "$(Get-Location):c:\mnt"
- -e WINGET_GITHUB_ACCESS_TOKEN=${wingetPat}
- -e GENERAL_ARTIFACTS_CACHE_BUCKET_URL=${GENERAL_ARTIFACTS_CACHE_BUCKET_URL}
- 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES}
- Powershell
- -C "C:\mnt\tasks\winbuildscripts\Update-Winget.ps1"
- - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" }
diff --git a/.gitlab/dev_container_deploy/docker_linux.yml b/.gitlab/dev_container_deploy/docker_linux.yml
index 17c9a96f50677..6ce46a2053298 100644
--- a/.gitlab/dev_container_deploy/docker_linux.yml
+++ b/.gitlab/dev_container_deploy/docker_linux.yml
@@ -1,11 +1,11 @@
---
include:
- - /.gitlab/common/container_publish_job_templates.yml
+ - .gitlab/common/container_publish_job_templates.yml
dev_branch-a6:
extends: .docker_publish_job_definition
stage: dev_container_deploy
- rules: !reference [.on_a6_manual]
+ rules: !reference [.manual]
needs:
- docker_build_agent6
- docker_build_agent6_jmx
@@ -21,22 +21,10 @@ dev_branch-a6:
- IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-py2py3-jmx-amd64
IMG_DESTINATIONS: agent-dev:${CI_COMMIT_REF_SLUG}-py2py3-jmx
-dev_branch-dogstatsd:
- extends: .docker_publish_job_definition
- stage: dev_container_deploy
- rules: !reference [.on_a7_manual]
- needs:
- - docker_build_dogstatsd_amd64
- - docker_build_dogstatsd_arm64
- variables:
- IMG_REGISTRIES: dev
- IMG_SOURCES: ${SRC_DSD}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64,${SRC_DSD}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-arm64
- IMG_DESTINATIONS: dogstatsd-dev:${CI_COMMIT_REF_SLUG}
-
dev_branch_multiarch-a6:
extends: .docker_publish_job_definition
stage: dev_container_deploy
- rules: !reference [.on_all_builds_a6_manual]
+ rules: !reference [.manual]
needs:
- docker_build_agent6
- docker_build_agent6_arm64
@@ -54,40 +42,10 @@ dev_branch_multiarch-a6:
- IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-py2py3-jmx-amd64
IMG_DESTINATIONS: agent-dev:${CI_COMMIT_REF_SLUG}-py2py3-jmx
-dev_branch_multiarch-a7:
- extends: .docker_publish_job_definition
- stage: dev_container_deploy
- rules: !reference [.on_a7_manual]
- needs:
- - docker_build_agent7
- - docker_build_agent7_arm64
- - docker_build_agent7_jmx
- - docker_build_agent7_jmx_arm64
- variables:
- IMG_REGISTRIES: dev
- parallel:
- matrix:
- - IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-arm64
- IMG_DESTINATIONS: agent-dev:${CI_COMMIT_REF_SLUG}-py3
- - IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx-arm64
- IMG_DESTINATIONS: agent-dev:${CI_COMMIT_REF_SLUG}-py3-jmx
-
-dev_branch_multiarch-dogstatsd:
- extends: .docker_publish_job_definition
- stage: dev_container_deploy
- rules: !reference [.on_a7_manual]
- needs:
- - docker_build_dogstatsd_amd64
- - docker_build_dogstatsd_arm64
- variables:
- IMG_REGISTRIES: dev
- IMG_SOURCES: ${SRC_DSD}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64,${SRC_DSD}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-arm64
- IMG_DESTINATIONS: dogstatsd-dev:${CI_COMMIT_REF_SLUG}
-
dev_master-a6:
extends: .docker_publish_job_definition
stage: dev_container_deploy
- rules: !reference [.on_main_a6]
+ rules: !reference [.on_main]
needs:
- docker_build_agent6
- docker_build_agent6_arm64
@@ -103,40 +61,10 @@ dev_master-a6:
- IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-jmx-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-jmx-arm64
IMG_DESTINATIONS: agent-dev:master-jmx,agent-dev:master-py2-jmx
-dev_master-a7:
- extends: .docker_publish_job_definition
- stage: dev_container_deploy
- rules: !reference [.on_main_a7]
- needs:
- - docker_build_agent7
- - docker_build_agent7_arm64
- - docker_build_agent7_jmx
- - docker_build_agent7_jmx_arm64
- variables:
- IMG_REGISTRIES: dev
- parallel:
- matrix:
- - IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-arm64
- IMG_DESTINATIONS: agent-dev:master-py3
- - IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx-arm64
- IMG_DESTINATIONS: agent-dev:master-py3-jmx
-
-dev_master-dogstatsd:
- extends: .docker_publish_job_definition
- stage: dev_container_deploy
- rules: !reference [.on_main_a7]
- needs:
- - docker_build_dogstatsd_amd64
- - docker_build_dogstatsd_arm64
- variables:
- IMG_REGISTRIES: dev
- IMG_SOURCES: ${SRC_DSD}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64,${SRC_DSD}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-arm64
- IMG_DESTINATIONS: dogstatsd-dev:master
-
dca_dev_branch:
extends: .docker_publish_job_definition
stage: dev_container_deploy
- rules: !reference [.on_a7_manual]
+ rules: !reference [.manual]
needs:
- docker_build_cluster_agent_amd64
variables:
@@ -147,7 +75,7 @@ dca_dev_branch:
dca_dev_branch_multiarch:
extends: .docker_publish_job_definition
stage: dev_container_deploy
- rules: !reference [.on_all_builds_a7_manual]
+ rules: !reference [.on_all_builds_manual]
needs:
- docker_build_cluster_agent_amd64
- docker_build_cluster_agent_arm64
@@ -159,7 +87,7 @@ dca_dev_branch_multiarch:
dca_dev_master:
extends: .docker_publish_job_definition
stage: dev_container_deploy
- rules: !reference [.on_main_a7]
+ rules: !reference [.on_main]
needs:
- docker_build_cluster_agent_amd64
variables:
@@ -170,7 +98,7 @@ dca_dev_master:
cws_instrumentation_dev_branch_multiarch:
extends: .docker_publish_job_definition
stage: dev_container_deploy
- rules: !reference [.on_all_builds_a7_manual]
+ rules: !reference [.on_all_builds_manual]
needs:
- docker_build_cws_instrumentation_amd64
- docker_build_cws_instrumentation_arm64
@@ -183,7 +111,7 @@ cws_instrumentation_dev_branch_multiarch:
dev_nightly_docker_hub-a6:
extends: .docker_publish_job_definition
stage: dev_container_deploy
- rules: !reference [.on_deploy_nightly_repo_branch_a6]
+ rules: !reference [.on_deploy_nightly_repo_branch]
needs:
- docker_build_agent6
- docker_build_agent6_arm64
@@ -200,48 +128,35 @@ dev_nightly_docker_hub-a6:
IMG_DESTINATIONS: agent-dev:nightly-${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHORT_SHA}-jmx,agent-dev:nightly-${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHORT_SHA}-py2-jmx,agent-dev:nightly-${CI_COMMIT_REF_SLUG}-py2-jmx
# deploys nightlies to agent-dev
-dev_nightly-a7:
+dev_nightly-a6:
extends: .docker_publish_job_definition
stage: dev_container_deploy
- rules: !reference [.on_deploy_nightly_repo_branch_a7]
+ rules: !reference [.on_deploy_nightly_repo_branch]
needs:
- - docker_build_agent7
- - docker_build_agent7_arm64
- - docker_build_agent7_jmx
- - docker_build_agent7_jmx_arm64
+ - docker_build_agent6
+ - docker_build_agent6_arm64
+ - docker_build_agent6_jmx
+ - docker_build_agent6_jmx_arm64
variables:
IMG_REGISTRIES: dev
parallel:
matrix:
- - IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-arm64
+ - IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-arm64
IMG_DESTINATIONS: agent-dev:nightly-${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHORT_SHA}-py3,agent-dev:nightly-${CI_COMMIT_REF_SLUG}-py3
- - IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx-arm64
+ - IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-jmx-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-jmx-arm64
IMG_DESTINATIONS: agent-dev:nightly-${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHORT_SHA}-py3-jmx,agent-dev:nightly-${CI_COMMIT_REF_SLUG}-py3-jmx
# deploy nightly build to single-machine-performance
-single_machine_performance-nightly-amd64-a7:
+single_machine_performance-nightly-amd64-a6:
extends: .docker_publish_job_definition
stage: dev_container_deploy
rules: !reference [.on_scheduled_main]
needs:
- - docker_build_agent7
+ - docker_build_agent6
variables:
IMG_REGISTRIES: internal-aws-smp
- IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-amd64
- IMG_DESTINATIONS: 08450328-agent:nightly-${CI_COMMIT_BRANCH}-${CI_COMMIT_SHA}-7-amd64
-
-# deploys nightlies to agent-dev
-dev_nightly-dogstatsd:
- extends: .docker_publish_job_definition
- stage: dev_container_deploy
- rules: !reference [.on_deploy_nightly_repo_branch_a7]
- needs:
- - docker_build_dogstatsd_amd64
- - docker_build_dogstatsd_arm64
- variables:
- IMG_REGISTRIES: dev
- IMG_SOURCES: ${SRC_DSD}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64,${SRC_DSD}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-arm64
- IMG_DESTINATIONS: dogstatsd-dev:nightly-${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHORT_SHA},dogstatsd-dev:nightly-${CI_COMMIT_REF_SLUG}
+ IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-amd64
+ IMG_DESTINATIONS: 08450328-agent:nightly-${CI_COMMIT_BRANCH}-${CI_COMMIT_SHA}-6-amd64
# push images to `datadog-agent-qa` ECR for the end-to-end tests defined in `e2e.yml`
qa_agent:
@@ -251,13 +166,13 @@ qa_agent:
- !reference [.on_container_or_e2e_changes_or_manual]
- !reference [.on_apm_or_e2e_changes_or_manual]
needs:
- - docker_build_agent7
- - docker_build_agent7_arm64
- - docker_build_agent7_windows1809
- - docker_build_agent7_windows2022
+ - docker_build_agent6
+ - docker_build_agent6_arm64
+ - docker_build_agent6_windows1809_core
+ - docker_build_agent6_windows2022_core
variables:
IMG_REGISTRIES: agent-qa
- IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-arm64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-win1809-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-winltsc2022-amd64
+ IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-arm64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-win1809-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-winltsc2022-amd64
IMG_DESTINATIONS: agent:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}
qa_dca:
@@ -273,19 +188,6 @@ qa_dca:
IMG_SOURCES: ${SRC_DCA}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64,${SRC_DCA}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-arm64
IMG_DESTINATIONS: cluster-agent:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}
-qa_dogstatsd:
- extends: .docker_publish_job_definition
- stage: dev_container_deploy
- rules:
- !reference [.on_container_or_e2e_changes_or_manual]
- needs:
- - docker_build_dogstatsd_amd64
- - docker_build_dogstatsd_arm64
- variables:
- IMG_REGISTRIES: agent-qa
- IMG_SOURCES: ${SRC_DSD}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64,${SRC_DSD}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-arm64
- IMG_DESTINATIONS: dogstatsd:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}
-
.qa_cws_instrumentation:
extends: .docker_publish_job_definition
stage: dev_container_deploy
diff --git a/.gitlab/dev_container_deploy/docker_windows.yml b/.gitlab/dev_container_deploy/docker_windows.yml
index 2c2f1ca1e27ce..b48414d689ea8 100644
--- a/.gitlab/dev_container_deploy/docker_windows.yml
+++ b/.gitlab/dev_container_deploy/docker_windows.yml
@@ -1,43 +1,12 @@
---
include:
- - /.gitlab/common/container_publish_job_templates.yml
-
-dev_branch-a7-windows:
- extends: .docker_publish_job_definition
- stage: dev_container_deploy
- rules:
- !reference [.on_a7_manual]
- needs:
- - docker_build_agent7_windows1809
- - docker_build_agent7_windows1809_jmx
- - docker_build_agent7_windows1809_core
- - docker_build_agent7_windows1809_core_jmx
- - docker_build_agent7_windows2022
- - docker_build_agent7_windows2022_jmx
- - docker_build_agent7_windows2022_core
- - docker_build_agent7_windows2022_core_jmx
- variables:
- IMG_REGISTRIES: dev
- parallel:
- matrix:
- - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7"
- IMG_SOURCES: "%BASE%-win1809-amd64,%BASE%-winltsc2022-amd64"
- IMG_DESTINATIONS: agent-dev:${CI_COMMIT_REF_SLUG}-py3-win
- - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx"
- IMG_SOURCES: "%BASE%-win1809-amd64,%BASE%-winltsc2022-amd64"
- IMG_DESTINATIONS: agent-dev:${CI_COMMIT_REF_SLUG}-py3-jmx-win
- - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7"
- IMG_SOURCES: "%BASE%-win1809-servercore-amd64,%BASE%-winltsc2022-servercore-amd64"
- IMG_DESTINATIONS: agent-dev:${CI_COMMIT_REF_SLUG}-py3-win-servercore
- - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx"
- IMG_SOURCES: "%BASE%-win1809-servercore-amd64,%BASE%-winltsc2022-servercore-amd64"
- IMG_DESTINATIONS: agent-dev:${CI_COMMIT_REF_SLUG}-py3-jmx-win-servercore
+ - .gitlab/common/container_publish_job_templates.yml
dev_branch-a6-windows:
extends: .docker_publish_job_definition
stage: dev_container_deploy
rules:
- !reference [.on_a6_manual]
+ !reference [.manual]
needs:
- docker_build_agent6_windows1809_core
- docker_build_agent6_windows2022_core
@@ -49,42 +18,11 @@ dev_branch-a6-windows:
IMG_SOURCES: "%BASE%-win1809-servercore-amd64,%BASE%-winltsc2022-servercore-amd64"
IMG_DESTINATIONS: agent-dev:${CI_COMMIT_REF_SLUG}-py2-win-servercore
-dev_master-a7-windows:
- extends: .docker_publish_job_definition
- stage: dev_container_deploy
- rules:
- !reference [.on_main_a7]
- needs:
- - docker_build_agent7_windows1809
- - docker_build_agent7_windows1809_jmx
- - docker_build_agent7_windows1809_core
- - docker_build_agent7_windows1809_core_jmx
- - docker_build_agent7_windows2022
- - docker_build_agent7_windows2022_jmx
- - docker_build_agent7_windows2022_core
- - docker_build_agent7_windows2022_core_jmx
- variables:
- IMG_REGISTRIES: dev
- parallel:
- matrix:
- - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7"
- IMG_SOURCES: "%BASE%-win1809-amd64,%BASE%-winltsc2022-amd64"
- IMG_DESTINATIONS: agent-dev:master-py3-win
- - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx"
- IMG_SOURCES: "%BASE%-win1809-amd64,%BASE%-winltsc2022-amd64"
- IMG_DESTINATIONS: agent-dev:master-py3-jmx-win
- - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7"
- IMG_SOURCES: "%BASE%-win1809-servercore-amd64,%BASE%-winltsc2022-servercore-amd64"
- IMG_DESTINATIONS: agent-dev:master-py3-win-servercore
- - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx"
- IMG_SOURCES: "%BASE%-win1809-servercore-amd64,%BASE%-winltsc2022-servercore-amd64"
- IMG_DESTINATIONS: agent-dev:master-py3-jmx-win-servercore
-
dev_master-a6-windows:
extends: .docker_publish_job_definition
stage: dev_container_deploy
rules:
- !reference [.on_main_a6]
+ !reference [.on_main]
needs:
- docker_build_agent6_windows1809_core
- docker_build_agent6_windows2022_core
@@ -96,42 +34,11 @@ dev_master-a6-windows:
IMG_SOURCES: "%BASE%-win1809-servercore-amd64,%BASE%-winltsc2022-servercore-amd64"
IMG_DESTINATIONS: agent-dev:master-py2-win-servercore
-dev_nightly-a7-windows:
- extends: .docker_publish_job_definition
- stage: dev_container_deploy
- rules:
- !reference [.on_deploy_nightly_repo_branch_a7]
- needs:
- - docker_build_agent7_windows1809
- - docker_build_agent7_windows1809_jmx
- - docker_build_agent7_windows1809_core
- - docker_build_agent7_windows1809_core_jmx
- - docker_build_agent7_windows2022
- - docker_build_agent7_windows2022_jmx
- - docker_build_agent7_windows2022_core
- - docker_build_agent7_windows2022_core_jmx
- variables:
- IMG_REGISTRIES: dev
- parallel:
- matrix:
- - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7"
- IMG_SOURCES: "%BASE%-win1809-amd64,%BASE%-winltsc2022-amd64"
- IMG_DESTINATIONS: agent-dev:nightly-${CI_COMMIT_SHORT_SHA}-py3-win
- - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx"
- IMG_SOURCES: "%BASE%-win1809-amd64,%BASE%-winltsc2022-amd64"
- IMG_DESTINATIONS: agent-dev:nightly-${CI_COMMIT_SHORT_SHA}-py3-jmx-win
- - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7"
- IMG_SOURCES: "%BASE%-win1809-servercore-amd64,%BASE%-winltsc2022-servercore-amd64"
- IMG_DESTINATIONS: agent-dev:nightly-${CI_COMMIT_SHORT_SHA}-py3-win-servercore
- - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx"
- IMG_SOURCES: "%BASE%-win1809-servercore-amd64,%BASE%-winltsc2022-servercore-amd64"
- IMG_DESTINATIONS: agent-dev:nightly-${CI_COMMIT_SHORT_SHA}-py3-jmx-win-servercore
-
dev_nightly-a6-windows:
extends: .docker_publish_job_definition
stage: dev_container_deploy
rules:
- !reference [.on_deploy_nightly_repo_branch_a6]
+ !reference [.on_deploy_nightly_repo_branch]
needs:
- docker_build_agent6_windows1809_core
- docker_build_agent6_windows2022_core
diff --git a/.gitlab/dev_container_deploy/include.yml b/.gitlab/dev_container_deploy/include.yml
index a1456b2b097d6..1ad939f24ae46 100644
--- a/.gitlab/dev_container_deploy/include.yml
+++ b/.gitlab/dev_container_deploy/include.yml
@@ -4,6 +4,6 @@
# (in the datadog/agent-dev | datadog/dogstatsd-dev Dockerhub repos).
include:
- - /.gitlab/dev_container_deploy/docker_linux.yml
- - /.gitlab/dev_container_deploy/docker_windows.yml
- - /.gitlab/dev_container_deploy/fakeintake.yml
+ - .gitlab/dev_container_deploy/docker_linux.yml
+ - .gitlab/dev_container_deploy/docker_windows.yml
+ - .gitlab/dev_container_deploy/fakeintake.yml
diff --git a/.gitlab/e2e/e2e.yml b/.gitlab/e2e/e2e.yml
index 555a6614c6857..4e33d64c46cfd 100644
--- a/.gitlab/e2e/e2e.yml
+++ b/.gitlab/e2e/e2e.yml
@@ -24,7 +24,7 @@
.k8s_e2e_template_needs_dev:
extends: .k8s_e2e_template
needs:
- - dev_branch_multiarch-a7
+ - dev_branch_multiarch-a6
- dca_dev_branch
.k8s_e2e_template_dev:
@@ -41,7 +41,7 @@
.k8s_e2e_template_needs_main:
extends: .k8s_e2e_template
needs:
- - dev_master-a7
+ - dev_master-a6
- dca_dev_master
.k8s_e2e_template_main_with_cws_cspm_init:
@@ -117,6 +117,7 @@ k8s-e2e-otlp-main:
E2E_PIPELINE_ID: $CI_PIPELINE_ID
E2E_COMMIT_SHA: $CI_COMMIT_SHORT_SHA
E2E_OUTPUT_DIR: $CI_PROJECT_DIR/e2e-output
+ E2E_MAJOR_VERSION: 6
script:
- inv -e new-e2e-tests.run --targets $TARGETS -c ddagent:imagePullRegistry=669783387624.dkr.ecr.us-east-1.amazonaws.com -c ddagent:imagePullUsername=AWS -c ddagent:imagePullPassword=$(aws ecr get-login-password) --junit-tar "junit-${CI_JOB_NAME}.tgz" ${EXTRA_PARAMS}
after_script:
@@ -138,20 +139,19 @@ k8s-e2e-otlp-main:
.new_e2e_template_needs_deb_x64:
extends: .new_e2e_template
needs:
- - deploy_deb_testing-a7_x64
+ - deploy_deb_testing-a6_x64
.new_e2e_template_needs_deb_windows_x64:
extends: .new_e2e_template
needs:
- - deploy_deb_testing-a7_x64
- - deploy_windows_testing-a7
+ - deploy_deb_testing-a6_x64
+ - deploy_windows_testing-a6
.new_e2e_template_needs_container_deploy:
extends: .new_e2e_template
needs:
- qa_agent
- qa_dca
- - qa_dogstatsd
new-e2e-containers:
extends:
@@ -223,9 +223,9 @@ new-e2e-npm:
rules: !reference [.on_npm_or_e2e_changes_or_manual]
needs:
- qa_agent
- - deploy_deb_testing-a7_x64
- - deploy_rpm_testing-a7_x64
- - deploy_windows_testing-a7
+ - deploy_deb_testing-a6_x64
+ - deploy_rpm_testing-a6_x64
+ - deploy_windows_testing-a6
variables:
TARGETS: ./tests/npm
TEAM: network-performance-monitoring
@@ -241,7 +241,7 @@ new-e2e-cws:
extends: .new_e2e_template
rules: !reference [.on_cws_or_e2e_changes_or_manual]
needs:
- - deploy_deb_testing-a7_x64
+ - deploy_deb_testing-a6_x64
- qa_cws_instrumentation
- qa_agent
variables:
@@ -273,7 +273,7 @@ new-e2e-apm:
rules: !reference [.on_apm_or_e2e_changes_or_manual]
needs:
- qa_agent
- - deploy_deb_testing-a7_x64
+ - deploy_deb_testing-a6_x64
variables:
TARGETS: ./tests/apm
TEAM: apm-agent
@@ -284,16 +284,6 @@ new-e2e-apm:
- EXTRA_PARAMS: --run TestVMFakeintakeSuiteUDS
- EXTRA_PARAMS: --run TestVMFakeintakeSuiteTCP
-new-e2e-updater:
- extends: .new_e2e_template
- rules:
- !reference [.on_updater_or_e2e_changes_or_manual]
- needs:
- - deploy_deb_testing-u7_arm64
- variables:
- TARGETS: ./tests/updater
- TEAM: fleet
-
# ^ If you create a new job here that extends `.new_e2e_template`,
# /!\ do not forget to add it in the `dependencies` statement of the
# /___\ `e2e_test_junit_upload` job in the `.gitlab/e2e_test_junit_upload.yml` file
diff --git a/.gitlab/functional_test/common.yml b/.gitlab/functional_test/common.yml
index 13b9500c2ff03..ac5c652d75f8b 100644
--- a/.gitlab/functional_test/common.yml
+++ b/.gitlab/functional_test/common.yml
@@ -3,7 +3,7 @@
# For now, the workaround is to include "common" files once in the top-level .gitlab-ci.yml file
# See: https://gitlab.com/gitlab-org/gitlab/-/issues/28987
# include:
-# - /.gitlab/kitchen_testing/testing.yml
+# - .gitlab/kitchen_testing/testing.yml
.kitchen_test_system_probe:
extends:
@@ -13,7 +13,7 @@
!reference [.on_system_probe_or_e2e_changes_or_manual]
timeout: 3h
variables:
- AGENT_MAJOR_VERSION: 7
+ AGENT_MAJOR_VERSION: 6
DD_PIPELINE_ID: $CI_PIPELINE_ID-fnct
CHEF_VERSION: 14.15.6
@@ -25,6 +25,6 @@
!reference [.manual]
stage: functional_test
variables:
- AGENT_MAJOR_VERSION: 7
+ AGENT_MAJOR_VERSION: 6
DD_PIPELINE_ID: $CI_PIPELINE_ID-fnct
CHEF_VERSION: 14.15.6
diff --git a/.gitlab/functional_test/include.yml b/.gitlab/functional_test/include.yml
index daa2eb3299e0f..3901f4d0cb53c 100644
--- a/.gitlab/functional_test/include.yml
+++ b/.gitlab/functional_test/include.yml
@@ -3,13 +3,13 @@
# Contains jobs which run kitchen tests on the security-agent and on system-probe
include:
- - /.gitlab/functional_test/common.yml
- - /.gitlab/functional_test/security_agent.yml
- - /.gitlab/functional_test/serverless.yml
- - /.gitlab/functional_test/regression_detector.yml
- - /.gitlab/functional_test/workload_checks.yml
- - /.gitlab/functional_test/system_probe_windows.yml
- - /.gitlab/kernel_matrix_testing/common.yml
- - /.gitlab/kernel_matrix_testing/system_probe.yml
- - /.gitlab/kernel_matrix_testing/security_agent.yml
- - /.gitlab/functional_test_sysprobe/system_probe.yml
+ - .gitlab/functional_test/common.yml
+ - .gitlab/functional_test/security_agent.yml
+ - .gitlab/functional_test/serverless.yml
+ - .gitlab/functional_test/regression_detector.yml
+ - .gitlab/functional_test/workload_checks.yml
+ - .gitlab/functional_test/system_probe_windows.yml
+ - .gitlab/kernel_matrix_testing/common.yml
+ - .gitlab/kernel_matrix_testing/system_probe.yml
+ - .gitlab/kernel_matrix_testing/security_agent.yml
+ - .gitlab/functional_test_sysprobe/system_probe.yml
diff --git a/.gitlab/functional_test/regression_detector.yml b/.gitlab/functional_test/regression_detector.yml
index 38d3609a2428c..3e6f8d36a1af6 100644
--- a/.gitlab/functional_test/regression_detector.yml
+++ b/.gitlab/functional_test/regression_detector.yml
@@ -6,7 +6,7 @@ single-machine-performance-regression_detector:
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/docker_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["runner:docker"]
needs:
- - job: single_machine_performance-amd64-a7
+ - job: single_machine_performance-amd64-a6
artifacts: false
artifacts:
expire_in: 1 weeks
@@ -58,15 +58,15 @@ single-machine-performance-regression_detector:
- BASELINE_SHA="${SMP_MERGE_BASE}"
- echo "Computing baseline..."
- echo "Checking if image exists for commit ${BASELINE_SHA}..."
- - while [[ ! $(aws ecr describe-images --profile single-machine-performance --registry-id "${SMP_ACCOUNT_ID}" --repository-name "${SMP_AGENT_TEAM_ID}-agent" --image-ids imageTag="${BASELINE_SHA}-7-amd64") ]]; do echo "No image exists for ${BASELINE_SHA} - checking predecessor of ${BASELINE_SHA} next"; BASELINE_SHA=$(git rev-parse ${BASELINE_SHA}^); echo "Checking if image exists for commit ${BASELINE_SHA}..."; done
+ - while [[ ! $(aws ecr describe-images --profile single-machine-performance --registry-id "${SMP_ACCOUNT_ID}" --repository-name "${SMP_AGENT_TEAM_ID}-agent" --image-ids imageTag="${BASELINE_SHA}-6-amd64") ]]; do echo "No image exists for ${BASELINE_SHA} - checking predecessor of ${BASELINE_SHA} next"; BASELINE_SHA=$(git rev-parse ${BASELINE_SHA}^); echo "Checking if image exists for commit ${BASELINE_SHA}..."; done
- echo "Image exists for commit ${BASELINE_SHA}"
- echo "Baseline SHA is ${BASELINE_SHA}"
- echo -n "${BASELINE_SHA}" > "${CI_COMMIT_SHA}-baseline_sha"
# Copy the baseline SHA to SMP for debugging purposes later
- aws s3 cp --profile single-machine-performance --only-show-errors "${CI_COMMIT_SHA}-baseline_sha" "s3://${SMP_AGENT_TEAM_ID}-smp-artifacts/information/"
- - BASELINE_IMAGE=${SMP_ECR_URL}/${SMP_AGENT_TEAM_ID}-agent:${BASELINE_SHA}-7-amd64
+ - BASELINE_IMAGE=${SMP_ECR_URL}/${SMP_AGENT_TEAM_ID}-agent:${BASELINE_SHA}-6-amd64
- echo "${BASELINE_SHA} | ${BASELINE_IMAGE}"
- - COMPARISON_IMAGE=${SMP_ECR_URL}/${SMP_AGENT_TEAM_ID}-agent:${CI_COMMIT_SHA}-7-amd64
+ - COMPARISON_IMAGE=${SMP_ECR_URL}/${SMP_AGENT_TEAM_ID}-agent:${CI_COMMIT_SHA}-6-amd64
- echo "${CI_COMMIT_SHA} | ${COMPARISON_IMAGE}"
- RUST_LOG="info,aws_config::profile::credentials=error"
- RUST_LOG_DEBUG="debug,aws_config::profile::credentials=error"
diff --git a/.gitlab/functional_test/security_agent.yml b/.gitlab/functional_test/security_agent.yml
index 93d9d725e5777..6baf6aa2a7dcf 100644
--- a/.gitlab/functional_test/security_agent.yml
+++ b/.gitlab/functional_test/security_agent.yml
@@ -3,7 +3,7 @@
# For now, the workaround is to include "common" files once in the top-level .gitlab-ci.yml file
# See: https://gitlab.com/gitlab-org/gitlab/-/issues/28987
# include:
-# - /.gitlab/kitchen_testing/testing.yml
+# - .gitlab/kitchen_testing/testing.yml
# Expect warning: github.com/DataDog/datadog-agent/pkg/config.LoadCustom:1501 Unknown environment variable: DD_SYSTEM_PROBE_BPF_DIR
.kitchen_test_security_agent_linux:
@@ -93,6 +93,7 @@ kitchen_test_security_agent_arm64:
KITCHEN_CWS_PLATFORM: [host, docker, ad, ebpfless]
kitchen_test_security_agent_amazonlinux_x64:
+ allow_failure: true
extends:
- .kitchen_test_security_agent_linux
- .kitchen_ec2_location_us_east_1
@@ -144,7 +145,6 @@ kitchen_test_security_agent_x64_ec2:
kitchen_test_security_agent_amazonlinux_x64_fentry:
extends:
- kitchen_test_security_agent_amazonlinux_x64
- allow_failure: true
parallel:
matrix:
- KITCHEN_PLATFORM: "amazonlinux"
@@ -162,7 +162,7 @@ kitchen_stress_security_agent:
stage: functional_test
needs: ["tests_ebpf_x64", "prepare_ebpf_functional_tests_x64"]
variables:
- AGENT_MAJOR_VERSION: 7
+ AGENT_MAJOR_VERSION: 6
DD_PIPELINE_ID: $CI_PIPELINE_ID-fnct
before_script:
- pushd $DD_AGENT_TESTING_DIR
@@ -189,9 +189,9 @@ kitchen_test_security_agent_windows_x64:
KITCHEN_OSVERS: "win2016"
CHEF_VERSION: 14.12.9 # newer versions error out during kitchen setup of azure VM
before_script:
- - export WINDOWS_DDPROCMON_DRIVER=$(inv release.get-release-json-value "$RELEASE_VERSION_7::WINDOWS_DDPROCMON_DRIVER")
- - export WINDOWS_DDPROCMON_VERSION=$(inv release.get-release-json-value "$RELEASE_VERSION_7::WINDOWS_DDPROCMON_VERSION")
- - export WINDOWS_DDPROCMON_SHASUM=$(inv release.get-release-json-value "$RELEASE_VERSION_7::WINDOWS_DDPROCMON_SHASUM")
+ - export WINDOWS_DDPROCMON_DRIVER=$(inv release.get-release-json-value "$RELEASE_VERSION_6::WINDOWS_DDPROCMON_DRIVER")
+ - export WINDOWS_DDPROCMON_VERSION=$(inv release.get-release-json-value "$RELEASE_VERSION_6::WINDOWS_DDPROCMON_VERSION")
+ - export WINDOWS_DDPROCMON_SHASUM=$(inv release.get-release-json-value "$RELEASE_VERSION_6::WINDOWS_DDPROCMON_SHASUM")
- pushd $DD_AGENT_TESTING_DIR
- tasks/kitchen_setup.sh
script:
diff --git a/.gitlab/functional_test/system_probe_windows.yml b/.gitlab/functional_test/system_probe_windows.yml
index 9a8fd3ad5ae18..5882aa196a977 100644
--- a/.gitlab/functional_test/system_probe_windows.yml
+++ b/.gitlab/functional_test/system_probe_windows.yml
@@ -3,12 +3,12 @@
# For now, the workaround is to include "common" files once in the top-level .gitlab-ci.yml file
# See: https://gitlab.com/gitlab-org/gitlab/-/issues/28987
# include:
-# - /.gitlab/kitchen_testing/testing.yml
-# - /.gitlab/functional_test/common.yml
+# - .gitlab/kitchen_testing/testing.yml
+# - .gitlab/functional_test/common.yml
kitchen_test_system_probe_windows_x64:
extends:
- - .kitchen_agent_a7
+ - .kitchen_agent_a6
- .kitchen_os_windows
- .kitchen_test_system_probe
- .kitchen_azure_x64
@@ -20,9 +20,9 @@ kitchen_test_system_probe_windows_x64:
KITCHEN_OSVERS: "win2016"
CHEF_VERSION: 14.12.9 # newer versions error out during kitchen setup of azure VM
before_script:
- - export WINDOWS_DDNPM_DRIVER=$(inv release.get-release-json-value "$RELEASE_VERSION_7::WINDOWS_DDNPM_DRIVER")
- - export WINDOWS_DDNPM_VERSION=$(inv release.get-release-json-value "$RELEASE_VERSION_7::WINDOWS_DDNPM_VERSION")
- - export WINDOWS_DDNPM_SHASUM=$(inv release.get-release-json-value "$RELEASE_VERSION_7::WINDOWS_DDNPM_SHASUM")
+ - export WINDOWS_DDNPM_DRIVER=$(inv release.get-release-json-value "$RELEASE_VERSION_6::WINDOWS_DDNPM_DRIVER")
+ - export WINDOWS_DDNPM_VERSION=$(inv release.get-release-json-value "$RELEASE_VERSION_6::WINDOWS_DDNPM_VERSION")
+ - export WINDOWS_DDNPM_SHASUM=$(inv release.get-release-json-value "$RELEASE_VERSION_6::WINDOWS_DDNPM_SHASUM")
- pushd $DD_AGENT_TESTING_DIR
- tasks/kitchen_setup.sh
script:
diff --git a/.gitlab/functional_test/workload_checks.yml b/.gitlab/functional_test/workload_checks.yml
index 43e8116f1f9d2..e54034f2b01d9 100644
--- a/.gitlab/functional_test/workload_checks.yml
+++ b/.gitlab/functional_test/workload_checks.yml
@@ -4,7 +4,7 @@ single-machine-performance-workload-checks:
tags: ["runner:docker"]
rules: !reference [.on_scheduled_main]
needs:
- - job: single_machine_performance-nightly-amd64-a7
+ - job: single_machine_performance-nightly-amd64-a6
artifacts: false
artifacts:
expire_in: 1 weeks
@@ -33,7 +33,7 @@ single-machine-performance-workload-checks:
- aws --profile single-machine-performance s3 cp s3://smp-cli-releases/v${SMP_VERSION}/x86_64-unknown-linux-gnu/smp smp
- chmod +x smp
- CURRENT_DATE=$(date --utc '+%Y_%m_%d')
- - TARGET_IMAGE=${SMP_ECR_URL}/${SMP_AGENT_TEAM_ID}-agent:nightly-${CI_COMMIT_BRANCH}-${CI_COMMIT_SHA}-7-amd64
+ - TARGET_IMAGE=${SMP_ECR_URL}/${SMP_AGENT_TEAM_ID}-agent:nightly-${CI_COMMIT_BRANCH}-${CI_COMMIT_SHA}-6-amd64
# Copy the TARGET_IMAGE to SMP for debugging purposes later
- RUST_LOG="info,aws_config::profile::credentials=error"
- RUST_LOG_DEBUG="debug,aws_config::profile::credentials=error"
diff --git a/.gitlab/functional_test_cleanup/functional_test_cleanup.yml b/.gitlab/functional_test_cleanup/functional_test_cleanup.yml
index 7d2d4bbd866c5..8e73c0b1bdca1 100644
--- a/.gitlab/functional_test_cleanup/functional_test_cleanup.yml
+++ b/.gitlab/functional_test_cleanup/functional_test_cleanup.yml
@@ -6,7 +6,7 @@
# For now, the workaround is to include "common" files once in the top-level .gitlab-ci.yml file
# See: https://gitlab.com/gitlab-org/gitlab/-/issues/28987
# include:
-# - /.gitlab/kitchen_common/cleanup.yml
+# - .gitlab/kitchen_common/cleanup.yml
cleanup_kitchen_functional_test:
extends: .kitchen_cleanup_azure_common
diff --git a/.gitlab/functional_test_sysprobe/system_probe.yml b/.gitlab/functional_test_sysprobe/system_probe.yml
index 5276288d6a336..7050cf5e874b0 100644
--- a/.gitlab/functional_test_sysprobe/system_probe.yml
+++ b/.gitlab/functional_test_sysprobe/system_probe.yml
@@ -3,8 +3,8 @@
# For now, the workaround is to include "common" files once in the top-level .gitlab-ci.yml file
# See: https://gitlab.com/gitlab-org/gitlab/-/issues/28987
# include:
-# - /.gitlab/kitchen_testing/testing.yml
-# - /.gitlab/functional_test/common.yml
+# - .gitlab/kitchen_testing/testing.yml
+# - .gitlab/functional_test/common.yml
.kitchen_test_system_probe_linux:
extends:
diff --git a/.gitlab/install_script_testing/install_script_testing.yml b/.gitlab/install_script_testing/install_script_testing.yml
index 2d440b510c259..2ab56b89bf6f9 100644
--- a/.gitlab/install_script_testing/install_script_testing.yml
+++ b/.gitlab/install_script_testing/install_script_testing.yml
@@ -14,8 +14,8 @@ test_install_script:
--variable TESTING_APT_URL
--variable TESTING_YUM_URL
--variable TEST_PIPELINE_ID"
- needs: ["deploy_deb_testing-a6_x64", "deploy_rpm_testing-a6_x64", "deploy_suse_rpm_testing_x64-a6", "deploy_deb_testing-a7_x64", "deploy_rpm_testing-a7_x64", "deploy_suse_rpm_testing_x64-a7"]
+ needs: ["deploy_deb_testing-a6_x64", "deploy_rpm_testing-a6_x64", "deploy_suse_rpm_testing_x64-a6"]
rules:
- - !reference [.except_no_a6_or_no_a7]
+ - !reference [.except_mergequeue]
- !reference [.on_deploy]
diff --git a/.gitlab/integration_test/dogstatsd.yml b/.gitlab/integration_test/dogstatsd.yml
deleted file mode 100644
index 092d8f4859359..0000000000000
--- a/.gitlab/integration_test/dogstatsd.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# integration_test stage
-# Contains jobs to run integration tests in dogstatsd go binaries
-
-dogstatsd_x64_size_test:
- stage: integration_test
- rules:
- !reference [.on_a7]
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs: ["build_dogstatsd_static-binary_x64"]
- before_script:
- - source /root/.bashrc
- - mkdir -p $STATIC_BINARIES_DIR
- - $S3_CP_CMD $S3_ARTIFACTS_URI/static/dogstatsd.amd64 $STATIC_BINARIES_DIR/dogstatsd
- script:
- - inv -e dogstatsd.size-test --skip-build
diff --git a/.gitlab/integration_test/include.yml b/.gitlab/integration_test/include.yml
index 3d195fbf448d7..a63322943bae0 100644
--- a/.gitlab/integration_test/include.yml
+++ b/.gitlab/integration_test/include.yml
@@ -3,5 +3,4 @@
# Contains jobs to run integration tests in go binaries
include:
- - /.gitlab/integration_test/dogstatsd.yml
- - /.gitlab/integration_test/windows.yml
+ - .gitlab/integration_test/windows.yml
diff --git a/.gitlab/internal_image_deploy/internal_image_deploy.yml b/.gitlab/internal_image_deploy/internal_image_deploy.yml
index ce135eb19ee5d..da825ede34ae3 100644
--- a/.gitlab/internal_image_deploy/internal_image_deploy.yml
+++ b/.gitlab/internal_image_deploy/internal_image_deploy.yml
@@ -4,11 +4,11 @@
docker_trigger_internal:
stage: internal_image_deploy
- rules: !reference [.on_deploy_a7_internal_or_manual]
+ rules: !reference [.on_deploy_internal_or_manual]
needs:
- - job: docker_build_agent7_jmx
+ - job: docker_build_agent6_jmx
artifacts: false
- - job: docker_build_agent7_jmx_arm64
+ - job: docker_build_agent6_jmx_arm64
artifacts: false
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
@@ -18,7 +18,7 @@ docker_trigger_internal:
IMAGE_NAME: datadog-agent
RELEASE_TAG: ${CI_COMMIT_REF_SLUG}-jmx
BUILD_TAG: ${CI_COMMIT_REF_SLUG}-jmx
- TMPL_SRC_IMAGE: v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx
+ TMPL_SRC_IMAGE: v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-6-jmx
TMPL_SRC_REPO: ci/datadog-agent/agent
RELEASE_STAGING: "true"
script:
@@ -50,7 +50,7 @@ docker_trigger_internal:
docker_trigger_cluster_agent_internal:
stage: internal_image_deploy
- rules: !reference [.on_deploy_a7]
+ rules: !reference [.on_deploy]
needs:
- job: docker_build_cluster_agent_amd64
artifacts: false
@@ -97,7 +97,7 @@ docker_trigger_cluster_agent_internal:
docker_trigger_cws_instrumentation_internal:
stage: internal_image_deploy
- rules: !reference [.on_deploy_a7]
+ rules: !reference [.on_deploy]
needs:
- job: docker_build_cws_instrumentation_amd64
artifacts: false
diff --git a/.gitlab/internal_kubernetes_deploy/include.yml b/.gitlab/internal_kubernetes_deploy/include.yml
index 68f5048e62ea1..4161ee65e6f04 100644
--- a/.gitlab/internal_kubernetes_deploy/include.yml
+++ b/.gitlab/internal_kubernetes_deploy/include.yml
@@ -3,5 +3,5 @@
# Contains jobs to trigger a pipeline in our k8s-datadog-agent-ops repo
include:
- - /.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml
- - /.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml
\ No newline at end of file
+ - .gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml
+ - .gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml
\ No newline at end of file
diff --git a/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml b/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml
index 014baf6689429..ee595d33bd81d 100644
--- a/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml
+++ b/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml
@@ -14,7 +14,7 @@ internal_kubernetes_deploy_experimental:
when: never
- if: $DDR != "true"
when: never
- - !reference [.on_deploy_a7]
+ - !reference [.on_deploy]
needs:
- job: docker_trigger_internal
artifacts: false
@@ -56,7 +56,7 @@ notify-slack:
when: never
- if: $DDR != "true"
when: never
- - !reference [.on_deploy_a7]
+ - !reference [.on_deploy]
tags: ["arch:amd64"]
needs: ["internal_kubernetes_deploy_experimental"]
script:
diff --git a/.gitlab/kitchen_cleanup/cleanup.yml b/.gitlab/kitchen_cleanup/cleanup.yml
index 9422f277b1bff..f4241d80062f0 100644
--- a/.gitlab/kitchen_cleanup/cleanup.yml
+++ b/.gitlab/kitchen_cleanup/cleanup.yml
@@ -8,7 +8,7 @@
- aws s3 rm s3://$DEB_TESTING_S3_BUCKET/dists/pipeline-$DD_PIPELINE_ID --recursive
- aws s3 rm s3://$RPM_TESTING_S3_BUCKET/testing/pipeline-$DD_PIPELINE_ID --recursive
- aws s3 rm s3://$RPM_TESTING_S3_BUCKET/testing/suse/pipeline-$DD_PIPELINE_ID --recursive
- - if [ $AGENT_MAJOR_VERSION == "7" ]; then export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A7; else export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A6; fi
+ - export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A6
- aws s3 rm s3://$WIN_S3_BUCKET/$WINDOWS_TESTING_S3_BUCKET --recursive
- cd $OMNIBUS_PACKAGE_DIR
# Remove all deb packages for the pipeline in the pool
diff --git a/.gitlab/kitchen_cleanup/include.yml b/.gitlab/kitchen_cleanup/include.yml
index 1c2ed673e6d3f..f0a86b5fcf296 100644
--- a/.gitlab/kitchen_cleanup/include.yml
+++ b/.gitlab/kitchen_cleanup/include.yml
@@ -2,5 +2,5 @@
# kitchen_cleanup stage
# Include file for jobs which clean up kitchen resources created for Agent kitchen tests.
include:
- - /.gitlab/kitchen_cleanup/cleanup.yml
- - /.gitlab/kitchen_cleanup/kitchen_cleanup.yml
\ No newline at end of file
+ - .gitlab/kitchen_cleanup/cleanup.yml
+ - .gitlab/kitchen_cleanup/kitchen_cleanup.yml
\ No newline at end of file
diff --git a/.gitlab/kitchen_cleanup/kitchen_cleanup.yml b/.gitlab/kitchen_cleanup/kitchen_cleanup.yml
index e83a4729492aa..3cf99771bdfcc 100644
--- a/.gitlab/kitchen_cleanup/kitchen_cleanup.yml
+++ b/.gitlab/kitchen_cleanup/kitchen_cleanup.yml
@@ -6,18 +6,11 @@
# For now, the workaround is to include "common" files once in the top-level .gitlab-ci.yml file
# See: https://gitlab.com/gitlab-org/gitlab/-/issues/28987
# include:
-# - /.gitlab/kitchen_common/cleanup.yml
+# - .gitlab/kitchen_common/cleanup.yml
kitchen_cleanup_azure-a6:
extends: .kitchen_cleanup_azure_common
rules:
- !reference [.on_kitchen_tests_a6_always]
+ !reference [.on_kitchen_tests_always]
variables:
DD_PIPELINE_ID: $CI_PIPELINE_ID-a6
-
-kitchen_cleanup_azure-a7:
- extends: .kitchen_cleanup_azure_common
- rules:
- !reference [.on_default_kitchen_tests_a7_always]
- variables:
- DD_PIPELINE_ID: $CI_PIPELINE_ID-a7
diff --git a/.gitlab/kitchen_deploy/kitchen_deploy.yml b/.gitlab/kitchen_deploy/kitchen_deploy.yml
index 40cc8f00fa107..0d38070fcf825 100644
--- a/.gitlab/kitchen_deploy/kitchen_deploy.yml
+++ b/.gitlab/kitchen_deploy/kitchen_deploy.yml
@@ -15,7 +15,7 @@
.setup_signing_keys_package:
&setup_signing_keys_package # Set up prod apt repo to get the datadog-signing-keys package
- - echo 'deb [signed-by=/usr/share/keyrings/datadog-archive-keyring.gpg] https://apt.datadoghq.com/ stable 7' > /etc/apt/sources.list.d/datadog.list
+ - echo 'deb [signed-by=/usr/share/keyrings/datadog-archive-keyring.gpg] https://apt.datadoghq.com/ stable 6' > /etc/apt/sources.list.d/datadog.list
- touch /usr/share/keyrings/datadog-archive-keyring.gpg
- chmod a+r /usr/share/keyrings/datadog-archive-keyring.gpg
- curl https://keys.datadoghq.com/DATADOG_APT_KEY_CURRENT.public | gpg --no-default-keyring --keyring /usr/share/keyrings/datadog-archive-keyring.gpg --import --batch
@@ -37,12 +37,6 @@
.deploy_deb_resource_group-a6: &deploy_deb_resource_group-a6
resource_group: deploy_deb_a6
-.deploy_deb_resource_group-a7: &deploy_deb_resource_group-a7
- resource_group: deploy_deb_a7
-
-.deploy_deb_resource_group-u7: &deploy_deb_resource_group-u7
- resource_group: deploy_deb_u7
-
.deploy_deb_testing-a6:
stage: kitchen_deploy
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
@@ -54,19 +48,11 @@
- source /root/.bashrc
- ls $OMNIBUS_PACKAGE_DIR
-.deploy_deb_testing-u7:
- stage: kitchen_deploy
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
- tags: ["arch:amd64"]
- <<: *deploy_deb_resource_group-u7
- variables:
- DD_PIPELINE_ID: $CI_PIPELINE_ID-u7
- before_script:
- - source /root/.bashrc
- - ls $OMNIBUS_PACKAGE_DIR
-
deploy_deb_testing-a6_x64:
- rules: !reference [.on_kitchen_tests_a6]
+ rules:
+ - !reference [.except_no_tests_no_deploy]
+ - !reference [.except_mergequeue]
+ - when: on_success
extends:
- .deploy_deb_testing-a6
needs:
@@ -87,7 +73,10 @@ deploy_deb_testing-a6_x64:
- echo "$APT_SIGNING_KEY_PASSPHRASE" | deb-s3 upload -c "pipeline-$DD_PIPELINE_ID-x86_64" -m 6 -b $DEB_TESTING_S3_BUCKET -a x86_64 --sign=$DEB_GPG_KEY_ID --gpg_options="--passphrase-fd 0 --batch --digest-algo SHA512" --preserve_versions --visibility public $OMNIBUS_PACKAGE_DIR/datadog-signing-keys_${DD_PIPELINE_ID}.deb
deploy_deb_testing-a6_arm64:
- rules: !reference [.on_all_kitchen_builds_a6]
+ rules:
+ - !reference [.on_all_install_script_tests]
+ - !reference [.on_installer_or_e2e_changes]
+ - !reference [.manual]
extends:
- .deploy_deb_testing-a6
needs: ["agent_deb-arm64-a6"]
@@ -100,72 +89,6 @@ deploy_deb_testing-a6_arm64:
- echo "$APT_SIGNING_KEY_PASSPHRASE" | deb-s3 upload -c "pipeline-$DD_PIPELINE_ID-arm64" -m 6 -b $DEB_TESTING_S3_BUCKET -a arm64 --sign=$DEB_GPG_KEY_ID --gpg_options="--passphrase-fd 0 --batch --digest-algo SHA512" --preserve_versions --visibility public $OMNIBUS_PACKAGE_DIR/datadog-*_6*arm64.deb
- echo "$APT_SIGNING_KEY_PASSPHRASE" | deb-s3 upload -c "pipeline-$DD_PIPELINE_ID-arm64" -m 6 -b $DEB_TESTING_S3_BUCKET -a arm64 --sign=$DEB_GPG_KEY_ID --gpg_options="--passphrase-fd 0 --batch --digest-algo SHA512" --preserve_versions --visibility public $OMNIBUS_PACKAGE_DIR/datadog-signing-keys_${DD_PIPELINE_ID}.deb
-.deploy_deb_testing-a7:
- stage: kitchen_deploy
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
- tags: ["arch:amd64"]
- <<: *deploy_deb_resource_group-a7
- variables:
- DD_PIPELINE_ID: $CI_PIPELINE_ID-a7
- before_script:
- - source /root/.bashrc
- - ls $OMNIBUS_PACKAGE_DIR
-
-deploy_deb_testing-a7_x64:
- rules:
- - !reference [.except_no_tests_no_deploy]
- - !reference [.on_a7]
- extends:
- - .deploy_deb_testing-a7
- needs:
- [
- "agent_deb-x64-a7",
- "agent_heroku_deb-x64-a7",
- "iot_agent_deb-x64",
- "dogstatsd_deb-x64",
- "lint_linux-x64",
- ]
- script:
- - *setup_apt_signing_key
- - set +x # make sure we don't output the creds to the build log
-
- - *setup_signing_keys_package
-
- - echo "$APT_SIGNING_KEY_PASSPHRASE" | deb-s3 upload -c "pipeline-$DD_PIPELINE_ID-x86_64" -m 7 -b $DEB_TESTING_S3_BUCKET -a amd64 --sign=$DEB_GPG_KEY_ID --gpg_options="--passphrase-fd 0 --batch --digest-algo SHA512" --preserve_versions --visibility public $OMNIBUS_PACKAGE_DIR/datadog-*_7*amd64.deb
- - echo "$APT_SIGNING_KEY_PASSPHRASE" | deb-s3 upload -c "pipeline-$DD_PIPELINE_ID-x86_64" -m 7 -b $DEB_TESTING_S3_BUCKET -a x86_64 --sign=$DEB_GPG_KEY_ID --gpg_options="--passphrase-fd 0 --batch --digest-algo SHA512" --preserve_versions --visibility public $OMNIBUS_PACKAGE_DIR/datadog-*_7*amd64.deb
- - echo "$APT_SIGNING_KEY_PASSPHRASE" | deb-s3 upload -c "pipeline-$DD_PIPELINE_ID-x86_64" -m 7 -b $DEB_TESTING_S3_BUCKET -a amd64 --sign=$DEB_GPG_KEY_ID --gpg_options="--passphrase-fd 0 --batch --digest-algo SHA512" --preserve_versions --visibility public $OMNIBUS_PACKAGE_DIR/datadog-signing-keys_${DD_PIPELINE_ID}.deb
- - echo "$APT_SIGNING_KEY_PASSPHRASE" | deb-s3 upload -c "pipeline-$DD_PIPELINE_ID-x86_64" -m 7 -b $DEB_TESTING_S3_BUCKET -a x86_64 --sign=$DEB_GPG_KEY_ID --gpg_options="--passphrase-fd 0 --batch --digest-algo SHA512" --preserve_versions --visibility public $OMNIBUS_PACKAGE_DIR/datadog-signing-keys_${DD_PIPELINE_ID}.deb
-
-deploy_deb_testing-a7_arm64:
- rules: !reference [.on_all_kitchen_builds_a7]
- extends:
- - .deploy_deb_testing-a7
- needs: ["agent_deb-arm64-a7", "lint_linux-arm64"]
- script:
- - *setup_apt_signing_key
- - set +x # make sure we don't output the creds to the build log
-
- - *setup_signing_keys_package
-
- - echo "$APT_SIGNING_KEY_PASSPHRASE" | deb-s3 upload -c "pipeline-$DD_PIPELINE_ID-arm64" -m 7 -b $DEB_TESTING_S3_BUCKET -a arm64 --sign=$DEB_GPG_KEY_ID --gpg_options="--passphrase-fd 0 --batch --digest-algo SHA512" --preserve_versions --visibility public $OMNIBUS_PACKAGE_DIR/datadog-*_7*arm64.deb
- - echo "$APT_SIGNING_KEY_PASSPHRASE" | deb-s3 upload -c "pipeline-$DD_PIPELINE_ID-arm64" -m 7 -b $DEB_TESTING_S3_BUCKET -a arm64 --sign=$DEB_GPG_KEY_ID --gpg_options="--passphrase-fd 0 --batch --digest-algo SHA512" --preserve_versions --visibility public $OMNIBUS_PACKAGE_DIR/datadog-signing-keys_${DD_PIPELINE_ID}.deb
-
-deploy_deb_testing-u7_arm64:
- rules:
- - !reference [.except_no_tests_no_deploy]
- - !reference [.on_a7]
- extends:
- - .deploy_deb_testing-u7
- needs: ["updater_deb-arm64", "lint_linux-arm64"]
- script:
- - *setup_apt_signing_key
- - set +x # make sure we don't output the creds to the build log
-
- - *setup_signing_keys_package
-
- - echo "$APT_SIGNING_KEY_PASSPHRASE" | deb-s3 upload -c "pipeline-$DD_PIPELINE_ID-arm64" -m 7 -b $DEB_TESTING_S3_BUCKET -a arm64 --sign=$DEB_GPG_KEY_ID --gpg_options="--passphrase-fd 0 --batch --digest-algo SHA512" --preserve_versions --visibility public $OMNIBUS_PACKAGE_DIR/datadog-updater*arm64.deb
- - echo "$APT_SIGNING_KEY_PASSPHRASE" | deb-s3 upload -c "pipeline-$DD_PIPELINE_ID-arm64" -m 7 -b $DEB_TESTING_S3_BUCKET -a arm64 --sign=$DEB_GPG_KEY_ID --gpg_options="--passphrase-fd 0 --batch --digest-algo SHA512" --preserve_versions --visibility public $OMNIBUS_PACKAGE_DIR/datadog-signing-keys_${DD_PIPELINE_ID}.deb
-
.deploy_rpm_testing-a6:
stage: kitchen_deploy
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
@@ -177,7 +100,10 @@ deploy_deb_testing-u7_arm64:
- ls $OMNIBUS_PACKAGE_DIR
deploy_rpm_testing-a6_x64:
- rules: !reference [.on_kitchen_tests_a6]
+ rules:
+ - !reference [.except_no_tests_no_deploy]
+ - !reference [.except_mergequeue]
+ - when: on_success
extends:
- .deploy_rpm_testing-a6
needs:
@@ -191,7 +117,10 @@ deploy_rpm_testing-a6_x64:
- echo "$RPM_SIGNING_PASSPHRASE" | rpm-s3 --verbose --visibility public-read -c "https://s3.amazonaws.com" -b $RPM_TESTING_S3_BUCKET -p "testing/pipeline-$DD_PIPELINE_ID/6/x86_64/" -a "x86_64" --sign --metadata-signing-key $RPM_GPG_KEY_ID $OMNIBUS_PACKAGE_DIR/datadog-*-6.*x86_64.rpm
deploy_rpm_testing-a6_arm64:
- rules: !reference [.on_all_kitchen_builds_a6]
+ rules:
+ - !reference [.on_all_install_script_tests]
+ - !reference [.on_installer_or_e2e_changes]
+ - !reference [.manual]
extends:
- .deploy_rpm_testing-a6
needs:
@@ -204,46 +133,11 @@ deploy_rpm_testing-a6_arm64:
- set +x
- echo "$RPM_SIGNING_PASSPHRASE" | rpm-s3 --verbose --visibility public-read -c "https://s3.amazonaws.com" -b $RPM_TESTING_S3_BUCKET -p "testing/pipeline-$DD_PIPELINE_ID/6/aarch64/" -a "aarch64" --sign --metadata-signing-key $RPM_GPG_KEY_ID $OMNIBUS_PACKAGE_DIR/datadog-*-6.*aarch64.rpm
-.deploy_rpm_testing-a7:
- stage: kitchen_deploy
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
- tags: ["arch:amd64"]
- variables:
- DD_PIPELINE_ID: $CI_PIPELINE_ID-a7
- before_script:
- - source /root/.bashrc
- - ls $OMNIBUS_PACKAGE_DIR
-
-deploy_rpm_testing-a7_x64:
+deploy_suse_rpm_testing_x64-a6:
rules:
- !reference [.except_no_tests_no_deploy]
- - !reference [.on_a7]
- extends:
- - .deploy_rpm_testing-a7
- needs:
- [
- "agent_rpm-x64-a7",
- "iot_agent_rpm-x64",
- "dogstatsd_rpm-x64",
- "lint_linux-x64",
- ]
- script:
- - *setup_rpm_signing_key
- - set +x
- - echo "$RPM_SIGNING_PASSPHRASE" | rpm-s3 --verbose --visibility public-read -c "https://s3.amazonaws.com" -b $RPM_TESTING_S3_BUCKET -p "testing/pipeline-$DD_PIPELINE_ID/7/x86_64/" -a "x86_64" --sign --metadata-signing-key $RPM_GPG_KEY_ID $OMNIBUS_PACKAGE_DIR/datadog-*-7.*x86_64.rpm
-
-deploy_rpm_testing-a7_arm64:
- rules: !reference [.on_all_kitchen_builds_a7]
- extends:
- - .deploy_rpm_testing-a7
- needs: ["agent_rpm-arm64-a7", "lint_linux-arm64"]
- script:
- - *setup_rpm_signing_key
- - set +x
- - echo "$RPM_SIGNING_PASSPHRASE" | rpm-s3 --verbose --visibility public-read -c "https://s3.amazonaws.com" -b $RPM_TESTING_S3_BUCKET -p "testing/pipeline-$DD_PIPELINE_ID/7/aarch64/" -a "aarch64" --sign --metadata-signing-key $RPM_GPG_KEY_ID $OMNIBUS_PACKAGE_DIR/datadog-*-7.*aarch64.rpm
-
-deploy_suse_rpm_testing_x64-a6:
- rules: !reference [.on_kitchen_tests_a6]
+ - !reference [.except_mergequeue]
+ - when: on_success
stage: kitchen_deploy
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
tags: ["arch:amd64"]
@@ -262,50 +156,30 @@ deploy_suse_rpm_testing_x64-a6:
- set +x
- echo "$RPM_SIGNING_PASSPHRASE" | rpm-s3 --verbose --visibility public-read -c "https://s3.amazonaws.com" -b $RPM_TESTING_S3_BUCKET -p "suse/testing/pipeline-$DD_PIPELINE_ID/6/x86_64/" -a "x86_64" --sign --metadata-signing-key $RPM_GPG_KEY_ID --repodata-store-public-key $OMNIBUS_PACKAGE_DIR_SUSE/datadog-*-6.*x86_64.rpm
-deploy_suse_rpm_testing_x64-a7:
+deploy_suse_rpm_testing_arm64-a6:
rules:
- - !reference [.except_no_tests_no_deploy]
- - !reference [.on_a7]
- stage: kitchen_deploy
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
- tags: ["arch:amd64"]
- needs:
- [
- "agent_suse-x64-a7",
- "iot_agent_suse-x64",
- "dogstatsd_suse-x64",
- "lint_linux-x64",
- ]
- variables:
- DD_PIPELINE_ID: $CI_PIPELINE_ID-a7
- before_script:
- - source /root/.bashrc
- - ls $OMNIBUS_PACKAGE_DIR_SUSE
- script:
- - *setup_rpm_signing_key
- - set +x
- - echo "$RPM_SIGNING_PASSPHRASE" | rpm-s3 --verbose --visibility public-read -c "https://s3.amazonaws.com" -b $RPM_TESTING_S3_BUCKET -p "suse/testing/pipeline-$DD_PIPELINE_ID/7/x86_64/" -a "x86_64" --sign --metadata-signing-key $RPM_GPG_KEY_ID --repodata-store-public-key $OMNIBUS_PACKAGE_DIR_SUSE/datadog-*-7.*x86_64.rpm
-
-deploy_suse_rpm_testing_arm64-a7:
- rules: !reference [.on_kitchen_tests_a7]
+ - !reference [.on_kitchen_tests]
+ - !reference [.on_installer_or_e2e_changes]
+ - !reference [.manual]
stage: kitchen_deploy
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
tags: ["arch:amd64"]
- needs: ["agent_suse-arm64-a7", "lint_linux-arm64"]
+ needs: ["agent_suse-arm64-a6", "lint_linux-arm64"]
variables:
- DD_PIPELINE_ID: $CI_PIPELINE_ID-a7
+ DD_PIPELINE_ID: $CI_PIPELINE_ID-a6
before_script:
- source /root/.bashrc
- ls $OMNIBUS_PACKAGE_DIR_SUSE
script:
- *setup_rpm_signing_key
- set +x
- - echo "$RPM_SIGNING_PASSPHRASE" | rpm-s3 --verbose --visibility public-read -c "https://s3.amazonaws.com" -b $RPM_TESTING_S3_BUCKET -p "suse/testing/pipeline-$DD_PIPELINE_ID/7/aarch64/" -a "aarch64" --sign --metadata-signing-key $RPM_GPG_KEY_ID --repodata-store-public-key $OMNIBUS_PACKAGE_DIR_SUSE/datadog-*-7.*aarch64.rpm
+ - echo "$RPM_SIGNING_PASSPHRASE" | rpm-s3 --verbose --visibility public-read -c "https://s3.amazonaws.com" -b $RPM_TESTING_S3_BUCKET -p "suse/testing/pipeline-$DD_PIPELINE_ID/6/aarch64/" -a "aarch64" --sign --metadata-signing-key $RPM_GPG_KEY_ID --repodata-store-public-key $OMNIBUS_PACKAGE_DIR_SUSE/datadog-*-6.*aarch64.rpm
deploy_windows_testing-a6:
rules:
- - !reference [.on_kitchen_tests_a6]
- - !reference [.on_windows_installer_changes_or_manual]
+ - !reference [.except_no_tests_no_deploy]
+ - !reference [.except_mergequeue]
+ - when: on_success
stage: kitchen_deploy
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
tags: ["arch:amd64"]
@@ -315,19 +189,3 @@ deploy_windows_testing-a6:
- ls $OMNIBUS_PACKAGE_DIR
script:
- $S3_CP_CMD --recursive --exclude "*" --include "datadog-agent-6.*.msi" $OMNIBUS_PACKAGE_DIR s3://$WIN_S3_BUCKET/$WINDOWS_TESTING_S3_BUCKET_A6 --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=id=3a6e02b08553fd157ae3fb918945dd1eaae5a1aa818940381ef07a430cf25732
-
-deploy_windows_testing-a7:
- rules:
- - !reference [.except_no_tests_no_deploy]
- - !reference [.on_a7]
- - !reference [.on_windows_installer_changes_or_manual]
- stage: kitchen_deploy
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-builders/gitlab_agent_deploy:$DATADOG_AGENT_BUILDERS
- tags: ["arch:amd64"]
- needs:
- ["lint_windows-x64", "windows_msi_and_bosh_zip_x64-a7"]
- before_script:
- - source /root/.bashrc
- - ls $OMNIBUS_PACKAGE_DIR
- script:
- - $S3_CP_CMD --recursive --exclude "*" --include "datadog-agent-7.*.msi" $OMNIBUS_PACKAGE_DIR s3://$WIN_S3_BUCKET/$WINDOWS_TESTING_S3_BUCKET_A7 --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=id=3a6e02b08553fd157ae3fb918945dd1eaae5a1aa818940381ef07a430cf25732
diff --git a/.gitlab/kitchen_testing/centos.yml b/.gitlab/kitchen_testing/centos.yml
index 87c64517e31e8..a85ed566ded9e 100644
--- a/.gitlab/kitchen_testing/centos.yml
+++ b/.gitlab/kitchen_testing/centos.yml
@@ -3,7 +3,7 @@
# For now, the workaround is to include "common" files once in the top-level .gitlab-ci.yml file
# See: https://gitlab.com/gitlab-org/gitlab/-/issues/28987
# include:
-# - /.gitlab/kitchen_testing/testing.yml
+# - .gitlab/kitchen_testing/testing.yml
# Kitchen: OSes
# -------------
@@ -22,20 +22,20 @@
# Kitchen: scenarios (os * agent * (cloud + arch))
# -------------------------------
-.kitchen_scenario_centos_no_support_for_fips_a7:
+.kitchen_scenario_centos_no_support_for_fips_a6:
extends:
- - .kitchen_agent_a7
+ - .kitchen_agent_a6
- .kitchen_os_centos_no_support_for_fips
- .kitchen_azure_x64
- needs: ["deploy_rpm_testing-a7_x64"]
+ needs: ["deploy_rpm_testing-a6_x64"]
# Kitchen: final test matrix (tests * scenarios)
# ----------------------------------------------
-kitchen_centos_process_agent-a7:
+kitchen_centos_process_agent-a6:
variables:
KITCHEN_OSVERS: "rhel-81"
DEFAULT_KITCHEN_OSVERS: "rhel-81"
extends:
- - .kitchen_scenario_centos_no_support_for_fips_a7
+ - .kitchen_scenario_centos_no_support_for_fips_a6
- .kitchen_test_process_agent
diff --git a/.gitlab/kitchen_testing/debian.yml b/.gitlab/kitchen_testing/debian.yml
index fc1922957969d..75d080d421362 100644
--- a/.gitlab/kitchen_testing/debian.yml
+++ b/.gitlab/kitchen_testing/debian.yml
@@ -3,7 +3,7 @@
# For now, the workaround is to include "common" files once in the top-level .gitlab-ci.yml file
# See: https://gitlab.com/gitlab-org/gitlab/-/issues/28987
# include:
-# - /.gitlab/kitchen_testing/testing.yml
+# - .gitlab/kitchen_testing/testing.yml
# Kitchen: OSes
# -------------
@@ -18,26 +18,26 @@
# Kitchen: scenarios (os * agent * (cloud + arch))
# -------------------------------
-.kitchen_scenario_debian_a7_x64:
+.kitchen_scenario_debian_a6_x64:
variables:
KITCHEN_OSVERS: "debian-9,debian-10,debian-11,debian-12"
KITCHEN_CWS_SUPPORTED_OSVERS: "debian-10,debian-11"
DEFAULT_KITCHEN_OSVERS: "debian-11"
extends:
- - .kitchen_agent_a7
+ - .kitchen_agent_a6
- .kitchen_os_debian
- .kitchen_azure_x64
- needs: ["deploy_deb_testing-a7_x64"]
+ needs: ["deploy_deb_testing-a6_x64"]
# We only want to run step-by-step tests on deploy pipelines,
# which is why they have a different rule (if_deploy_6/7)
-kitchen_debian_process_agent-a7:
+kitchen_debian_process_agent-a6:
rules:
- - !reference [.on_default_kitchen_tests_a7]
+ - !reference [.on_default_kitchen_tests]
variables:
KITCHEN_OSVERS: "debian-11"
DEFAULT_KITCHEN_OSVERS: "debian-11"
extends:
- - .kitchen_scenario_debian_a7_x64
+ - .kitchen_scenario_debian_a6_x64
- .kitchen_test_process_agent
diff --git a/.gitlab/kitchen_testing/include.yml b/.gitlab/kitchen_testing/include.yml
index 21b93409b4ae3..62b7e577a6c9c 100644
--- a/.gitlab/kitchen_testing/include.yml
+++ b/.gitlab/kitchen_testing/include.yml
@@ -3,11 +3,11 @@
# Contains jobs which run kitchen tests on the Agent packages.
include:
- - /.gitlab/kitchen_testing/centos.yml
- - /.gitlab/kitchen_testing/debian.yml
- - /.gitlab/kitchen_testing/new-e2e_testing.yml
- - /.gitlab/kitchen_testing/new-e2e_testing/include.yml
- - /.gitlab/kitchen_testing/suse.yml
- - /.gitlab/kitchen_testing/testing.yml
- - /.gitlab/kitchen_testing/ubuntu.yml
- - /.gitlab/kitchen_testing/windows.yml
+ - .gitlab/kitchen_testing/centos.yml
+ - .gitlab/kitchen_testing/debian.yml
+ - .gitlab/kitchen_testing/new-e2e_testing.yml
+ - .gitlab/kitchen_testing/new-e2e_testing/include.yml
+ - .gitlab/kitchen_testing/suse.yml
+ - .gitlab/kitchen_testing/testing.yml
+ - .gitlab/kitchen_testing/ubuntu.yml
+ - .gitlab/kitchen_testing/windows.yml
diff --git a/.gitlab/kitchen_testing/new-e2e_testing.yml b/.gitlab/kitchen_testing/new-e2e_testing.yml
index 363010f664fb4..59df6467385ce 100644
--- a/.gitlab/kitchen_testing/new-e2e_testing.yml
+++ b/.gitlab/kitchen_testing/new-e2e_testing.yml
@@ -1,15 +1,9 @@
.new-e2e_agent_a6:
- rules: !reference [.on_kitchen_tests_a6] #TODO: Change when migration is complete to another name without 'kitchen'
+ rules: !reference [.on_kitchen_tests] #TODO: Change when migration is complete to another name without 'kitchen'
variables:
AGENT_MAJOR_VERSION: 6
retry: 1
-.new-e2e_agent_a7:
- rules: !reference [.on_kitchen_tests_a7] #TODO: Change when migration is complete to another name without 'kitchen'
- variables:
- AGENT_MAJOR_VERSION: 7
- retry: 1
-
.new-e2e_install_script:
variables:
TARGETS: ./tests/agent-platform/install-script
diff --git a/.gitlab/kitchen_testing/new-e2e_testing/amazonlinux.yml b/.gitlab/kitchen_testing/new-e2e_testing/amazonlinux.yml
index 07e3b62c44445..f2ce8bb937d27 100644
--- a/.gitlab/kitchen_testing/new-e2e_testing/amazonlinux.yml
+++ b/.gitlab/kitchen_testing/new-e2e_testing/amazonlinux.yml
@@ -18,22 +18,6 @@
E2E_BRANCH_OSVERS: "amazonlinux2023"
needs: ["deploy_rpm_testing-a6_arm64"]
-.new-e2e_amazonlinux_a7_x86_64:
- variables:
- E2E_ARCH: x86_64
- E2E_OSVERS: "amazonlinux2-5-10,amazonlinux2022-5-15,amazonlinux2023"
- E2E_CWS_SUPPORTED_OSVERS: "amazonlinux2-5-10,amazonlinux2022-5-15,amazonlinux2023"
- E2E_BRANCH_OSVERS: "amazonlinux2023"
- needs: ["deploy_rpm_testing-a7_x64"]
-
-.new-e2e_amazonlinux_a7_arm64:
- variables:
- E2E_ARCH: arm64
- E2E_OSVERS: "amazonlinux2-5-10,amazonlinux2022-5-15,amazonlinux2023"
- E2E_CWS_SUPPORTED_OSVERS: "amazonlinux2-5-10,amazonlinux2022-5-15,amazonlinux2023"
- E2E_BRANCH_OSVERS: "amazonlinux2023"
- needs: ["deploy_rpm_testing-a7_arm64"]
-
new-e2e-agent-platform-install-script-amazonlinux-a6-x86_64:
stage: kitchen_testing
extends:
@@ -56,37 +40,13 @@ new-e2e-agent-platform-install-script-amazonlinux-a6-arm64:
variables:
FLAVOR: datadog-agent
-new-e2e-agent-platform-install-script-amazonlinux-a7-x64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_amazonlinux
- - .new-e2e_amazonlinux_a7_x86_64
- - .new-e2e_agent_a7
- rules: !reference [.on_default_new-e2e_tests_a7]
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-install-script-amazonlinux-a7-arm64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_amazonlinux
- - .new-e2e_amazonlinux_a7_arm64
- - .new-e2e_agent_a7
- rules: !reference [.on_all_new-e2e_tests_a7]
- variables:
- FLAVOR: datadog-agent
-
new-e2e-agent-platform-package-signing-amazonlinux-a6-x86_64:
stage: kitchen_testing
extends:
- .new_e2e_template
- - .new-e2e_amazonlinux_a7_x86_64
+ - .new-e2e_amazonlinux_a6_x86_64
- .new-e2e_package_signing
- rules: !reference [.on_default_new-e2e_tests_a7]
+ rules: !reference [.on_default_new-e2e_tests]
new-e2e-agent-platform-step-by-step-amazonlinux-a6-x86_64:
stage: kitchen_testing
@@ -97,7 +57,7 @@ new-e2e-agent-platform-step-by-step-amazonlinux-a6-x86_64:
- .new-e2e_amazonlinux_a6_x86_64
- .new-e2e_agent_a6
rules:
- !reference [.on_deploy_a6]
+ !reference [.on_deploy]
variables:
FLAVOR: datadog-agent
@@ -110,44 +70,7 @@ new-e2e-agent-platform-step-by-step-amazonlinux-a6-arm64:
- .new-e2e_amazonlinux_a6_arm64
- .new-e2e_agent_a6
rules:
- !reference [.on_deploy_a6]
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-step-by-step-amazonlinux-a7-x64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_step_by_step
- - .new-e2e_os_amazonlinux
- - .new-e2e_amazonlinux_a7_x86_64
- - .new-e2e_agent_a7
- rules:
- !reference [.on_deploy_a7]
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-step-by-step-amazonlinux-a7-arm64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_step_by_step
- - .new-e2e_os_amazonlinux
- - .new-e2e_amazonlinux_a7_arm64
- - .new-e2e_agent_a7
- rules:
- !reference [.on_deploy_a7]
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-install-script-upgrade7-amazonlinux-x64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_script_upgrade7
- - .new-e2e_os_amazonlinux
- - .new-e2e_amazonlinux_a7_x86_64
- - .new-e2e_agent_a7
+ !reference [.on_deploy]
variables:
FLAVOR: datadog-agent
@@ -161,18 +84,3 @@ new-e2e-agent-platform-install-script-upgrade6-amazonlinux-x64:
- .new-e2e_agent_a6
variables:
FLAVOR: datadog-agent
-
-new-e2e-agent-platform-install-script-upgrade7-amazonlinux-iot-agent-x64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_script_upgrade7
- - .new-e2e_os_amazonlinux
- - .new-e2e_amazonlinux_a7_x86_64
- - .new-e2e_agent_a7
- variables:
- FLAVOR: datadog-iot-agent
- parallel:
- matrix:
- - START_MAJOR_VERSION: [7]
- END_MAJOR_VERSION: [7]
diff --git a/.gitlab/kitchen_testing/new-e2e_testing/centos.yml b/.gitlab/kitchen_testing/new-e2e_testing/centos.yml
index 60be5debdf8ca..5c4fda025cd25 100644
--- a/.gitlab/kitchen_testing/new-e2e_testing/centos.yml
+++ b/.gitlab/kitchen_testing/new-e2e_testing/centos.yml
@@ -10,22 +10,6 @@
E2E_BRANCH_OSVERS: "centos-79"
needs: ["deploy_rpm_testing-a6_x64"]
-.new-e2e_centos_a7_x86_64:
- variables:
- E2E_ARCH: x86_64
- E2E_OSVERS: "centos-79,rhel-86"
- E2E_CWS_SUPPORTED_OSVERS: "centos-79,rhel-86"
- E2E_BRANCH_OSVERS: "centos-79"
- needs: ["deploy_rpm_testing-a7_x64"]
-
-.new-e2e_centos-fips_a7_x86_64:
- variables:
- E2E_ARCH: x86_64
- E2E_OSVERS: "rhel-86-fips"
- E2E_CWS_SUPPORTED_OSVERS: "rhel-86-fips"
- E2E_BRANCH_OSVERS: "rhel-86-fips"
- needs: ["deploy_rpm_testing-a7_x64"]
-
.new-e2e_centos-fips_a6_x86_64:
variables:
E2E_ARCH: x86_64
@@ -34,14 +18,6 @@
E2E_BRANCH_OSVERS: "rhel-86-fips"
needs: ["deploy_rpm_testing-a6_x64"]
-.new-e2e_centos6_a7_x86_64:
- variables:
- E2E_ARCH: x86_64
- E2E_OSVERS: "centos-610"
- E2E_BRANCH_OSVERS: "centos-610"
- E2E_OVERRIDE_INSTANCE_TYPE: "t2.medium" # CentOS 6 does not support ENA, so we cannot use t3 instances
- needs: ["deploy_rpm_testing-a7_x64"]
-
new-e2e-agent-platform-install-script-centos-a6-x86_64:
stage: kitchen_testing
extends:
@@ -53,87 +29,6 @@ new-e2e-agent-platform-install-script-centos-a6-x86_64:
variables:
FLAVOR: datadog-agent
-new-e2e-agent-platform-install-script-centos-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_centos
- - .new-e2e_centos_a7_x86_64
- - .new-e2e_agent_a7
- rules: !reference [.on_default_new-e2e_tests_a7]
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-install-script-centos-iot-agent-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_centos
- - .new-e2e_centos_a7_x86_64
- - .new-e2e_agent_a7
- rules: !reference [.on_default_new-e2e_tests_a7]
- variables:
- FLAVOR: datadog-iot-agent
-
-new-e2e-agent-platform-install-script-centos-dogstatsd-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_centos
- - .new-e2e_centos_a7_x86_64
- - .new-e2e_agent_a7
- variables:
- FLAVOR: datadog-dogstatsd
-
-new-e2e-agent-platform-install-script-centos-fips-a6-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_centos
- - .new-e2e_centos-fips_a6_x86_64
- - .new-e2e_agent_a6
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-install-script-centos-fips-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_centos
- - .new-e2e_centos-fips_a7_x86_64
- - .new-e2e_agent_a7
- rules: !reference [.on_default_new-e2e_tests_a7]
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-install-script-centos-fips-iot-agent-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_centos
- - .new-e2e_centos-fips_a7_x86_64
- - .new-e2e_agent_a7
- rules: !reference [.on_default_new-e2e_tests_a7]
- variables:
- FLAVOR: datadog-iot-agent
-
-new-e2e-agent-platform-install-script-centos-fips-dogstatsd-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_centos
- - .new-e2e_centos-fips_a7_x86_64
- - .new-e2e_agent_a7
- variables:
- FLAVOR: datadog-dogstatsd
-
new-e2e-agent-platform-step-by-step-centos-a6-x86_64:
stage: kitchen_testing
extends:
@@ -142,19 +37,7 @@ new-e2e-agent-platform-step-by-step-centos-a6-x86_64:
- .new-e2e_os_centos
- .new-e2e_centos_a6_x86_64
- .new-e2e_agent_a6
- rules: !reference [.on_deploy_a6]
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-step-by-step-centos-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_step_by_step
- - .new-e2e_os_centos
- - .new-e2e_centos_a7_x86_64
- - .new-e2e_agent_a7
- rules: !reference [.on_deploy_a7]
+ rules: !reference [.on_deploy]
variables:
FLAVOR: datadog-agent
@@ -169,83 +52,22 @@ new-e2e-agent-platform-install-script-upgrade6-centos-x86_64:
variables:
FLAVOR: datadog-agent
-new-e2e-agent-platform-install-script-upgrade7-centos-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_script_upgrade7
- - .new-e2e_os_centos
- - .new-e2e_centos_a7_x86_64
- - .new-e2e_agent_a7
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-install-script-upgrade7-centos-iot-agent-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_script_upgrade7
- - .new-e2e_os_centos
- - .new-e2e_centos_a7_x86_64
- - .new-e2e_agent_a7
- variables:
- FLAVOR: datadog-iot-agent
- parallel:
- matrix:
- - START_MAJOR_VERSION: [7]
- END_MAJOR_VERSION: [7]
-
-new-e2e-agent-platform-install-script-upgrade7-centos-fips-iot-agent-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_script_upgrade7
- - .new-e2e_os_centos
- - .new-e2e_centos-fips_a7_x86_64
- - .new-e2e_agent_a7
- variables:
- FLAVOR: datadog-iot-agent
- parallel:
- matrix:
- - START_MAJOR_VERSION: [7]
- END_MAJOR_VERSION: [7]
-
-new-e2e-agent-platform-install-script-upgrade6-centos-fips-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_script_upgrade6
- - .new-e2e_os_centos
- - .new-e2e_centos-fips_a6_x86_64
- - .new-e2e_agent_a6
- variables:
- FLAVOR: datadog-agent
- parallel:
- matrix:
- - START_MAJOR_VERSION: [6]
- END_MAJOR_VERSION: [6]
-
-new-e2e-agent-platform-install-script-upgrade7-centos-fips-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_script_upgrade7
- - .new-e2e_os_centos
- - .new-e2e_centos-fips_a7_x86_64
- - .new-e2e_agent_a7
+.new-e2e_centos6_a6_x86_64:
variables:
- FLAVOR: datadog-agent
- parallel:
- matrix:
- - START_MAJOR_VERSION: [6, 7]
- END_MAJOR_VERSION: [7]
+ E2E_ARCH: x86_64
+ E2E_OSVERS: "centos-610"
+ E2E_BRANCH_OSVERS: "centos-610"
+ E2E_OVERRIDE_INSTANCE_TYPE: "t2.medium" # CentOS 6 does not support ENA, so we cannot use t3 instances
+ needs:
+ - go_tools_deps
+ - deploy_rpm_testing-a6_x64
-new-e2e-agent-platform-rpm-centos6-a7-x86_64:
+new-e2e-agent-platform-rpm-centos6-a6-x86_64:
stage: kitchen_testing
extends:
- .new_e2e_template
- .new-e2e_rpm
- .new-e2e_os_centos
- - .new-e2e_centos6_a7_x86_64
- - .new-e2e_agent_a7
- rules: !reference [.on_default_new-e2e_tests_a7]
+ - .new-e2e_centos6_a6_x86_64
+ - .new-e2e_agent_a6
+ rules: !reference [.on_default_new-e2e_tests]
diff --git a/.gitlab/kitchen_testing/new-e2e_testing/debian.yml b/.gitlab/kitchen_testing/new-e2e_testing/debian.yml
index 7ab6e8dfed77c..25e91db088595 100644
--- a/.gitlab/kitchen_testing/new-e2e_testing/debian.yml
+++ b/.gitlab/kitchen_testing/new-e2e_testing/debian.yml
@@ -18,22 +18,6 @@
E2E_BRANCH_OSVERS: "debian-10"
needs: ["deploy_deb_testing-a6_arm64"]
-.new-e2e_debian_a7_x86_64:
- variables:
- E2E_ARCH: x86_64
- E2E_OSVERS: "debian-9,debian-10,debian-11,debian-12"
- E2E_CWS_SUPPORTED_OSVERS: "debian-10,debian-11"
- E2E_BRANCH_OSVERS: "debian-11"
- needs: ["deploy_deb_testing-a7_x64"]
-
-.new-e2e_debian_a7_arm64:
- variables:
- E2E_ARCH: arm64
- E2E_OSVERS: "debian-10"
- E2E_CWS_SUPPORTED_OSVERS: "debian-10"
- E2E_BRANCH_OSVERS: "debian-10"
- needs: ["deploy_deb_testing-a7_arm64"]
-
new-e2e-agent-platform-install-script-debian-a6-x86_64:
stage: kitchen_testing
extends:
@@ -56,53 +40,6 @@ new-e2e-agent-platform-install-script-debian-a6-arm64:
variables:
FLAVOR: datadog-agent
-new-e2e-agent-platform-install-script-debian-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_debian
- - .new-e2e_debian_a7_x86_64
- - .new-e2e_agent_a7
- rules: !reference [.on_default_new-e2e_tests_a7]
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-install-script-debian-a7-arm64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_debian
- - .new-e2e_debian_a7_arm64
- - .new-e2e_agent_a7
- rules: !reference [.on_all_new-e2e_tests_a7]
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-install-script-debian-iot-agent-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_debian
- - .new-e2e_debian_a7_x86_64
- - .new-e2e_agent_a7
- rules: !reference [.on_default_new-e2e_tests_a7]
- variables:
- FLAVOR: datadog-iot-agent
-
-new-e2e-agent-platform-install-script-debian-dogstatsd-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_debian
- - .new-e2e_debian_a7_x86_64
- - .new-e2e_agent_a7
- variables:
- FLAVOR: datadog-dogstatsd
-
new-e2e-agent-platform-install-script-debian-heroku-agent-a6-x86_64:
stage: kitchen_testing
extends:
@@ -114,48 +51,37 @@ new-e2e-agent-platform-install-script-debian-heroku-agent-a6-x86_64:
variables:
FLAVOR: datadog-heroku-agent
-new-e2e-agent-platform-install-script-debian-heroku-agent-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_debian
- - .new-e2e_debian_a7_x86_64
- - .new-e2e_agent_a7
- variables:
- FLAVOR: datadog-heroku-agent
-
-new-e2e-agent-platform-package-signing-debian-a7-x86_64:
+new-e2e-agent-platform-package-signing-debian-a6-x86_64:
stage: kitchen_testing
extends:
- .new_e2e_template
- - .new-e2e_debian_a7_x86_64
+ - .new-e2e_debian_a6_x86_64
- .new-e2e_package_signing
- rules: !reference [.on_default_new-e2e_tests_a7]
+ rules: !reference [.on_default_new-e2e_tests]
-new-e2e-agent-platform-step-by-step-debian-a7-x64:
+new-e2e-agent-platform-step-by-step-debian-a6-x64:
stage: kitchen_testing
extends:
- .new_e2e_template
- .new-e2e_step_by_step
- .new-e2e_os_debian
- - .new-e2e_debian_a7_x86_64
- - .new-e2e_agent_a7
+ - .new-e2e_debian_a6_x86_64
+ - .new-e2e_agent_a6
rules:
- !reference [.on_deploy_a7]
+ !reference [.on_deploy]
variables:
FLAVOR: datadog-agent
-new-e2e-agent-platform-step-by-step-debian-a7-arm64:
+new-e2e-agent-platform-step-by-step-debian-a6-arm64:
stage: kitchen_testing
extends:
- .new_e2e_template
- .new-e2e_step_by_step
- .new-e2e_os_debian
- - .new-e2e_debian_a7_arm64
- - .new-e2e_agent_a7
+ - .new-e2e_debian_a6_arm64
+ - .new-e2e_agent_a6
rules:
- !reference [.on_deploy_a7]
+ !reference [.on_deploy]
variables:
FLAVOR: datadog-agent
@@ -168,20 +94,7 @@ new-e2e-agent-platform-step-by-step-debian-a6-x86_64:
- .new-e2e_debian_a6_x86_64
- .new-e2e_agent_a6
rules:
- !reference [.on_deploy_a6]
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-step-by-step-debian-a6-arm64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_step_by_step
- - .new-e2e_os_debian
- - .new-e2e_debian_a6_arm64
- - .new-e2e_agent_a6
- rules:
- !reference [.on_deploy_a6]
+ !reference [.on_deploy]
variables:
FLAVOR: datadog-agent
@@ -195,29 +108,3 @@ new-e2e-agent-platform-install-script-upgrade6-debian-x86_64:
- .new-e2e_agent_a6
variables:
FLAVOR: datadog-agent
-
-new-e2e-agent-platform-install-script-upgrade7-debian-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_script_upgrade7
- - .new-e2e_os_debian
- - .new-e2e_debian_a7_x86_64
- - .new-e2e_agent_a7
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-install-script-upgrade7-debian-iot-agent-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_script_upgrade7
- - .new-e2e_os_debian
- - .new-e2e_debian_a7_x86_64
- - .new-e2e_agent_a7
- variables:
- FLAVOR: datadog-iot-agent
- parallel:
- matrix:
- - START_MAJOR_VERSION: [7]
- END_MAJOR_VERSION: [7]
diff --git a/.gitlab/kitchen_testing/new-e2e_testing/include.yml b/.gitlab/kitchen_testing/new-e2e_testing/include.yml
index 78f6d77e5074e..30ab8be78521c 100644
--- a/.gitlab/kitchen_testing/new-e2e_testing/include.yml
+++ b/.gitlab/kitchen_testing/new-e2e_testing/include.yml
@@ -3,9 +3,9 @@
# Contains jobs which run new-e2e tests on the Agent packages.
include:
- - /.gitlab/kitchen_testing/new-e2e_testing/debian.yml
- - /.gitlab/kitchen_testing/new-e2e_testing/ubuntu.yml
- - /.gitlab/kitchen_testing/new-e2e_testing/amazonlinux.yml
- - /.gitlab/kitchen_testing/new-e2e_testing/centos.yml
- - /.gitlab/kitchen_testing/new-e2e_testing/suse.yml
- - /.gitlab/kitchen_testing/new-e2e_testing/windows.yml
+ - .gitlab/kitchen_testing/new-e2e_testing/debian.yml
+ - .gitlab/kitchen_testing/new-e2e_testing/ubuntu.yml
+ - .gitlab/kitchen_testing/new-e2e_testing/amazonlinux.yml
+ - .gitlab/kitchen_testing/new-e2e_testing/centos.yml
+ - .gitlab/kitchen_testing/new-e2e_testing/suse.yml
+ - .gitlab/kitchen_testing/new-e2e_testing/windows.yml
diff --git a/.gitlab/kitchen_testing/new-e2e_testing/suse.yml b/.gitlab/kitchen_testing/new-e2e_testing/suse.yml
index 2cef2035fa42e..14b7d505186f5 100644
--- a/.gitlab/kitchen_testing/new-e2e_testing/suse.yml
+++ b/.gitlab/kitchen_testing/new-e2e_testing/suse.yml
@@ -16,21 +16,13 @@
E2E_BRANCH_OSVERS: "sles-15"
needs: ["deploy_suse_rpm_testing_x64-a6"]
-.new-e2e_suse_a7_x86_64:
- variables:
- E2E_ARCH: x86_64
- E2E_OSVERS: "sles-12,sles-15"
- E2E_CWS_SUPPORTED_OSVERS: "sles-12,sles-15"
- E2E_BRANCH_OSVERS: "sles-15"
- needs: ["deploy_suse_rpm_testing_x64-a7"]
-
-.new-e2e_suse_a7_arm64:
+.new-e2e_suse_a6_arm64:
variables:
E2E_ARCH: arm64
E2E_OSVERS: "sles-15"
E2E_CWS_SUPPORTED_OSVERS: "sles-15"
E2E_BRANCH_OSVERS: "sles-15"
- needs: ["deploy_suse_rpm_testing_arm64-a7"]
+ needs: ["deploy_suse_rpm_testing_arm64-a6"]
new-e2e-agent-platform-install-script-suse-a6-x86_64:
stage: kitchen_testing
@@ -43,60 +35,24 @@ new-e2e-agent-platform-install-script-suse-a6-x86_64:
variables:
FLAVOR: datadog-agent
-new-e2e-agent-platform-install-script-suse-a7-x86_64:
+new-e2e-agent-platform-install-script-suse-a6-arm64:
stage: kitchen_testing
extends:
- .new_e2e_template
- .new-e2e_install_script
- .new-e2e_os_suse
- - .new-e2e_suse_a7_x86_64
- - .new-e2e_agent_a7
- rules: !reference [.on_default_new-e2e_tests_a7]
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-install-script-suse-a7-arm64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_suse
- - .new-e2e_suse_a7_arm64
- - .new-e2e_agent_a7
- rules: !reference [.on_all_new-e2e_tests_a7]
+ - .new-e2e_suse_a6_arm64
+ - .new-e2e_agent_a6
variables:
FLAVOR: datadog-agent
-new-e2e-agent-platform-install-script-suse-iot-agent-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_suse
- - .new-e2e_suse_a7_x86_64
- - .new-e2e_agent_a7
- rules: !reference [.on_default_new-e2e_tests_a7]
- variables:
- FLAVOR: datadog-iot-agent
-
-new-e2e-agent-platform-install-script-suse-dogstatsd-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_suse
- - .new-e2e_suse_a7_x86_64
- - .new-e2e_agent_a7
- variables:
- FLAVOR: datadog-dogstatsd
-
-new-e2e-agent-platform-package-signing-suse-a7-x86_64:
+new-e2e-agent-platform-package-signing-suse-a6-x86_64:
stage: kitchen_testing
extends:
- .new_e2e_template
- - .new-e2e_suse_a7_x86_64
+ - .new-e2e_suse_a6_x86_64
- .new-e2e_package_signing
- rules: !reference [.on_default_new-e2e_tests_a7]
+ rules: !reference [.on_default_new-e2e_tests]
new-e2e-agent-platform-step-by-step-suse-a6-x86_64:
stage: kitchen_testing
@@ -107,56 +63,28 @@ new-e2e-agent-platform-step-by-step-suse-a6-x86_64:
- .new-e2e_suse_a6_x86_64
- .new-e2e_agent_a6
rules:
- !reference [.on_deploy_a6]
+ !reference [.on_deploy]
variables:
FLAVOR: datadog-agent
-new-e2e-agent-platform-step-by-step-suse-a7-x86_64:
+new-e2e-agent-platform-step-by-step-suse-a6-arm64:
stage: kitchen_testing
extends:
- .new_e2e_template
- .new-e2e_step_by_step
- .new-e2e_os_suse
- - .new-e2e_suse_a7_x86_64
- - .new-e2e_agent_a7
- rules:
- !reference [.on_deploy_a7]
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-step-by-step-suse-a7-arm64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_step_by_step
- - .new-e2e_os_suse
- - .new-e2e_suse_a7_arm64
- - .new-e2e_agent_a7
+ - .new-e2e_suse_a6_arm64
+ - .new-e2e_agent_a6
rules:
- !reference [.on_deploy_a7]
+ !reference [.on_deploy]
variables:
FLAVOR: datadog-agent
-new-e2e-agent-platform-install-script-upgrade7-suse-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_script_upgrade7
- - .new-e2e_os_suse
- - .new-e2e_suse_a7_x86_64
- - .new-e2e_agent_a7
- variables:
- FLAVOR: datadog-agent
- parallel:
- matrix:
- - START_MAJOR_VERSION: [6,7]
- END_MAJOR_VERSION: [7]
-
new-e2e-agent-platform-install-script-upgrade6-suse-x86_64:
stage: kitchen_testing
extends:
- .new_e2e_template
- - .new-e2e_script_upgrade7
+ - .new-e2e_script_upgrade6
- .new-e2e_os_suse
- .new-e2e_suse_a6_x86_64
- .new-e2e_agent_a6
@@ -165,19 +93,4 @@ new-e2e-agent-platform-install-script-upgrade6-suse-x86_64:
parallel:
matrix:
- START_MAJOR_VERSION: [6]
- END_MAJOR_VERSION: [6]
-
-new-e2e-agent-platform-install-script-upgrade7-suse-iot-agent-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_script_upgrade7
- - .new-e2e_os_suse
- - .new-e2e_suse_a7_x86_64
- - .new-e2e_agent_a7
- variables:
- FLAVOR: datadog-iot-agent
- parallel:
- matrix:
- - START_MAJOR_VERSION: [7]
END_MAJOR_VERSION: [7]
diff --git a/.gitlab/kitchen_testing/new-e2e_testing/ubuntu.yml b/.gitlab/kitchen_testing/new-e2e_testing/ubuntu.yml
index a30f6908f1a29..94413acf12a9a 100644
--- a/.gitlab/kitchen_testing/new-e2e_testing/ubuntu.yml
+++ b/.gitlab/kitchen_testing/new-e2e_testing/ubuntu.yml
@@ -31,22 +31,6 @@
E2E_BRANCH_OSVERS: "ubuntu-20-04"
needs: ["deploy_deb_testing-a6_arm64"]
-.new-e2e_ubuntu_a7_x86_64:
- variables:
- E2E_ARCH: x86_64
- E2E_OSVERS: "ubuntu-14-04,ubuntu-16-04,ubuntu-18-04,ubuntu-20-04,ubuntu-22-04"
- E2E_CWS_SUPPORTED_OSVERS: "ubuntu-18-04,ubuntu-20-04,ubuntu-22-04"
- E2E_BRANCH_OSVERS: "ubuntu-22-04"
- needs: ["deploy_deb_testing-a7_x64"]
-
-.new-e2e_ubuntu_a7_arm64:
- variables:
- E2E_ARCH: arm64
- E2E_OSVERS: "ubuntu-18-04,ubuntu-20-04"
- E2E_CWS_SUPPORTED_OSVERS: "ubuntu-18-04,ubuntu-20-04"
- E2E_BRANCH_OSVERS: "ubuntu-20-04"
- needs: ["deploy_deb_testing-a7_arm64"]
-
new-e2e-agent-platform-install-script-ubuntu-a6-x86_64:
stage: kitchen_testing
extends:
@@ -69,54 +53,6 @@ new-e2e-agent-platform-install-script-ubuntu-a6-arm64:
variables:
FLAVOR: datadog-agent
-new-e2e-agent-platform-install-script-ubuntu-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_ubuntu
- - .new-e2e_ubuntu_a7_x86_64
- - .new-e2e_agent_a7
- rules:
- !reference [.on_default_new-e2e_tests_a7]
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-install-script-ubuntu-a7-arm64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_ubuntu
- - .new-e2e_ubuntu_a7_arm64
- - .new-e2e_agent_a7
- rules:
- !reference [.on_all_new-e2e_tests_a7]
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-install-script-ubuntu-iot-agent-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_ubuntu
- - .new-e2e_ubuntu_a7_x86_64
- - .new-e2e_agent_a7
- variables:
- FLAVOR: datadog-iot-agent
-
-new-e2e-agent-platform-install-script-ubuntu-dogstatsd-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_ubuntu
- - .new-e2e_ubuntu_a7_x86_64
- - .new-e2e_agent_a7
- variables:
- FLAVOR: datadog-dogstatsd
-
new-e2e-agent-platform-install-script-ubuntu-heroku-agent-a6-x86_64:
stage: kitchen_testing
extends:
@@ -128,17 +64,6 @@ new-e2e-agent-platform-install-script-ubuntu-heroku-agent-a6-x86_64:
variables:
FLAVOR: datadog-heroku-agent
-new-e2e-agent-platform-install-script-ubuntu-heroku-agent-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_install_script
- - .new-e2e_os_ubuntu
- - .new-e2e_ubuntu_a7_x86_64
- - .new-e2e_agent_a7
- variables:
- FLAVOR: datadog-heroku-agent
-
new-e2e-agent-platform-step-by-step-ubuntu-a6-x86_64:
stage: kitchen_testing
extends:
@@ -148,7 +73,7 @@ new-e2e-agent-platform-step-by-step-ubuntu-a6-x86_64:
- .new-e2e_ubuntu_a6_x86_64
- .new-e2e_agent_a6
rules:
- !reference [.on_deploy_a6]
+ !reference [.on_deploy]
variables:
FLAVOR: datadog-agent
@@ -161,44 +86,7 @@ new-e2e-agent-platform-step-by-step-ubuntu-a6-arm64:
- .new-e2e_ubuntu_a6_arm64
- .new-e2e_agent_a6
rules:
- !reference [.on_deploy_a6]
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-step-by-step-ubuntu-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_step_by_step
- - .new-e2e_os_ubuntu
- - .new-e2e_ubuntu_a7_x86_64
- - .new-e2e_agent_a7
- rules:
- !reference [.on_deploy_a7]
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-step-by-step-ubuntu-a7-arm64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_step_by_step
- - .new-e2e_os_ubuntu
- - .new-e2e_ubuntu_a7_arm64
- - .new-e2e_agent_a7
- rules:
- !reference [.on_deploy_a7]
- variables:
- FLAVOR: datadog-agent
-
-new-e2e-agent-platform-install-script-upgrade7-ubuntu-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_script_upgrade7
- - .new-e2e_os_ubuntu
- - .new-e2e_ubuntu_a7_x86_64
- - .new-e2e_agent_a7
+ !reference [.on_deploy]
variables:
FLAVOR: datadog-agent
@@ -212,18 +100,3 @@ new-e2e-agent-platform-install-script-upgrade6-ubuntu-x86_64:
- .new-e2e_agent_a6
variables:
FLAVOR: datadog-agent
-
-new-e2e-agent-platform-install-script-upgrade7-ubuntu-iot-agent-x86_64:
- stage: kitchen_testing
- extends:
- - .new_e2e_template
- - .new-e2e_script_upgrade7
- - .new-e2e_os_ubuntu
- - .new-e2e_ubuntu_a7_x86_64
- - .new-e2e_agent_a7
- variables:
- FLAVOR: datadog-iot-agent
- parallel:
- matrix:
- - START_MAJOR_VERSION: [7]
- END_MAJOR_VERSION: [7]
diff --git a/.gitlab/kitchen_testing/new-e2e_testing/windows.yml b/.gitlab/kitchen_testing/new-e2e_testing/windows.yml
index 281e87d84990e..160964974a6e7 100644
--- a/.gitlab/kitchen_testing/new-e2e_testing/windows.yml
+++ b/.gitlab/kitchen_testing/new-e2e_testing/windows.yml
@@ -56,52 +56,33 @@ new-e2e-windows-agent-msi-windows-server-a6-x86_64:
- .new-e2e_windows_a6_x86_64
- .new-e2e_windows_installer_tests
rules:
- - !reference [.on_deploy_a6]
+ - !reference [.on_deploy]
- !reference [.on_windows_installer_changes_or_manual]
-# Agent 7
-.new-e2e_windows_a7_x86_64:
- variables:
- WINDOWS_AGENT_ARCH: "x86_64"
- extends:
- - .new-e2e_windows_msi
- - .new-e2e_agent_a7
- needs: ["deploy_windows_testing-a7"]
-
-## full tests
-new-e2e-windows-agent-msi-windows-server-a7-x86_64:
- stage: kitchen_testing
- extends:
- - .new-e2e_windows_a7_x86_64
- - .new-e2e_windows_installer_tests
- rules:
- - !reference [.on_deploy_a7]
- - !reference [.on_windows_installer_changes_or_manual]
-
-new-e2e-windows-agent-domain-tests-a7-x86_64:
+new-e2e-windows-agent-domain-tests-a6-x86_64:
stage: kitchen_testing
variables:
WINDOWS_AGENT_ARCH: "x86_64"
extends:
- .new_e2e_template
- .new-e2e_windows_domain_test
- - .new-e2e_agent_a7
- needs: ["deploy_windows_testing-a7"]
+ - .new-e2e_agent_a6
+ needs: ["deploy_windows_testing-a6"]
rules:
- - !reference [.on_deploy_a7]
+ - !reference [.on_deploy]
- !reference [.on_windows_installer_changes_or_manual]
## single test for PRs
## skipped if the full tests are running
-new-e2e-windows-agent-msi-upgrade-windows-server-a7-x86_64:
+new-e2e-windows-agent-msi-upgrade-windows-server-a6-x86_64:
stage: kitchen_testing
extends:
- .new-e2e_windows_msi
- - .new-e2e_windows_a7_x86_64
+ - .new-e2e_windows_a6_x86_64
rules:
- !reference [.except_main_or_release_branch]
- !reference [.except_windows_installer_changes]
- - !reference [.on_default_new-e2e_tests_a7]
+ - !reference [.on_default_new-e2e_tests]
# must be last since it ends with when: on_success
- !reference [.except_deploy]
variables:
diff --git a/.gitlab/kitchen_testing/suse.yml b/.gitlab/kitchen_testing/suse.yml
index 7a1fa74f503f2..8e0dd1142a3b2 100644
--- a/.gitlab/kitchen_testing/suse.yml
+++ b/.gitlab/kitchen_testing/suse.yml
@@ -3,7 +3,7 @@
# For now, the workaround is to include "common" files once in the top-level .gitlab-ci.yml file
# See: https://gitlab.com/gitlab-org/gitlab/-/issues/28987
# include:
-# - /.gitlab/kitchen_testing/testing.yml
+# - .gitlab/kitchen_testing/testing.yml
# Kitchen: OSes
@@ -22,18 +22,18 @@
# Kitchen: scenarios (os * agent * (cloud + arch))
# -------------------------------
-.kitchen_scenario_suse_x64_a7:
+.kitchen_scenario_suse_x64_a6:
extends:
- - .kitchen_agent_a7
+ - .kitchen_agent_a6
- .kitchen_os_suse
- .kitchen_azure_x64
- needs: ["deploy_suse_rpm_testing_x64-a7"]
+ needs: ["deploy_suse_rpm_testing_x64-a6"]
# Kitchen: final test matrix (tests * scenarios)
# ----------------------------------------------
-kitchen_suse_process_agent_x64-a7:
+kitchen_suse_process_agent_x64-a6:
extends:
- - .kitchen_scenario_suse_x64_a7
+ - .kitchen_scenario_suse_x64_a6
- .kitchen_test_process_agent
diff --git a/.gitlab/kitchen_testing/testing.yml b/.gitlab/kitchen_testing/testing.yml
index 6f6a59555f556..1706c19e007d2 100644
--- a/.gitlab/kitchen_testing/testing.yml
+++ b/.gitlab/kitchen_testing/testing.yml
@@ -80,7 +80,7 @@
.kitchen_agent_a6:
extends: .kitchen_common_with_junit
rules:
- !reference [.on_kitchen_tests_a6]
+ !reference [.on_kitchen_tests]
variables:
AGENT_MAJOR_VERSION: 6
DD_PIPELINE_ID: $CI_PIPELINE_ID-a6
@@ -88,7 +88,7 @@
.kitchen_agent_a7:
extends: .kitchen_common_with_junit
rules:
- !reference [.on_kitchen_tests_a7]
+ !reference [.on_kitchen_tests]
variables:
AGENT_MAJOR_VERSION: 7
DD_PIPELINE_ID: $CI_PIPELINE_ID-a7
diff --git a/.gitlab/kitchen_testing/ubuntu.yml b/.gitlab/kitchen_testing/ubuntu.yml
index 24c91e794a4e8..c02eb321a3a46 100644
--- a/.gitlab/kitchen_testing/ubuntu.yml
+++ b/.gitlab/kitchen_testing/ubuntu.yml
@@ -3,7 +3,7 @@
# For now, the workaround is to include "common" files once in the top-level .gitlab-ci.yml file
# See: https://gitlab.com/gitlab-org/gitlab/-/issues/28987
# include:
-# - /.gitlab/kitchen_common/testing.yml
+# - .gitlab/kitchen_common/testing.yml
# Kitchen: OSes
# -------------
@@ -18,25 +18,25 @@
# Kitchen: scenarios (os * agent * (cloud + arch))
# -------------------------------
-.kitchen_scenario_ubuntu_a7_x64:
+.kitchen_scenario_ubuntu_a6_x64:
variables:
KITCHEN_OSVERS: "ubuntu-14-04,ubuntu-16-04,ubuntu-18-04,ubuntu-20-04,ubuntu-22-04"
KITCHEN_CWS_SUPPORTED_OSVERS: "ubuntu-18-04,ubuntu-20-04,ubuntu-22-04"
DEFAULT_KITCHEN_OSVERS: "ubuntu-22-04"
extends:
- - .kitchen_agent_a7
+ - .kitchen_agent_a6
- .kitchen_os_ubuntu
- .kitchen_azure_x64
- needs: ["deploy_deb_testing-a7_x64"]
+ needs: ["deploy_deb_testing-a6_x64"]
# Kitchen: final test matrix (tests * scenarios)
# ----------------------------------------------
-kitchen_ubuntu_process_agent-a7:
+kitchen_ubuntu_process_agent-a6:
variables:
KITCHEN_OSVERS: "ubuntu-20-04"
DEFAULT_KITCHEN_OSVERS: "ubuntu-20-04"
extends:
- - .kitchen_scenario_ubuntu_a7_x64
+ - .kitchen_scenario_ubuntu_a6_x64
- .kitchen_test_process_agent
allow_failure: true
diff --git a/.gitlab/kitchen_testing/windows.yml b/.gitlab/kitchen_testing/windows.yml
index 4e3ce60f9871f..c03e1072f276f 100644
--- a/.gitlab/kitchen_testing/windows.yml
+++ b/.gitlab/kitchen_testing/windows.yml
@@ -3,7 +3,7 @@
# For now, the workaround is to include "common" files once in the top-level .gitlab-ci.yml file
# See: https://gitlab.com/gitlab-org/gitlab/-/issues/28987
# include:
-# - /.gitlab/kitchen_testing/testing.yml
+# - .gitlab/kitchen_testing/testing.yml
# Kitchen: OSes
# -------------
@@ -16,7 +16,7 @@
KITCHEN_OSVERS: "win2016,win2019,win2019cn,win2022"
DEFAULT_KITCHEN_OSVERS: "win2022"
before_script: # Note: if you are changing this, remember to also change .kitchen_test_windows_installer, which has a copy of this with less TEST_PLATFORMS defined.
- - if [ $AGENT_MAJOR_VERSION == "7" ]; then export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A7; else export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A6; fi
+ - export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A6
- cd $DD_AGENT_TESTING_DIR
- tasks/kitchen_setup.sh
# Windows kitchen tests are slower and more fragile (lots of WinRM::WinRMAuthorizationError and/or execution expired errors)
@@ -34,7 +34,7 @@
KITCHEN_PLATFORM: "windows"
KITCHEN_OSVERS: "win2016"
before_script: # Use a smaller set of TEST_PLATFORMS than .kitchen_os_windows
- - if [ $AGENT_MAJOR_VERSION == "7" ]; then export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A7; else export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A6; fi
+ - export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A6
- cd $DD_AGENT_TESTING_DIR
- tasks/kitchen_setup.sh
script:
@@ -46,11 +46,11 @@
extends:
- .kitchen_azure_x64
before_script: # test all of the kernels to make sure the driver loads and runs properly
- - if [ $AGENT_MAJOR_VERSION == "7" ]; then export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A7; else export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A6; fi
- - if [ $AGENT_MAJOR_VERSION == "7" ]; then export RELEASE_VERSION=$RELEASE_VERSION_7; else export RELEASE_VERSION=$RELEASE_VERSION_6; fi
- - export WINDOWS_DDPROCMON_DRIVER=$(inv release.get-release-json-value "$RELEASE_VERSION_7::WINDOWS_DDPROCMON_DRIVER")
- - export WINDOWS_DDPROCMON_VERSION=$(inv release.get-release-json-value "$RELEASE_VERSION_7::WINDOWS_DDPROCMON_VERSION")
- - export WINDOWS_DDPROCMON_SHASUM=$(inv release.get-release-json-value "$RELEASE_VERSION_7::WINDOWS_DDPROCMON_SHASUM")
+ - WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A6
+ - RELEASE_VERSION=$RELEASE_VERSION_6
+ - export WINDOWS_DDPROCMON_DRIVER=$(inv release.get-release-json-value "$RELEASE_VERSION_6::WINDOWS_DDPROCMON_DRIVER")
+ - export WINDOWS_DDPROCMON_VERSION=$(inv release.get-release-json-value "$RELEASE_VERSION_6::WINDOWS_DDPROCMON_VERSION")
+ - export WINDOWS_DDPROCMON_SHASUM=$(inv release.get-release-json-value "$RELEASE_VERSION_6::WINDOWS_DDPROCMON_SHASUM")
- export WINDOWS_DDNPM_DRIVER=$(inv release.get-release-json-value "$RELEASE_VERSION::WINDOWS_DDNPM_DRIVER")
- cd $DD_AGENT_TESTING_DIR
@@ -72,8 +72,8 @@
extends:
- .kitchen_azure_x64
before_script: # test all of the kernels to make sure the driver loads and runs properly
- - if [ $AGENT_MAJOR_VERSION == "7" ]; then export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A7; else export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A6; fi
- - if [ $AGENT_MAJOR_VERSION == "7" ]; then export RELEASE_VERSION=$RELEASE_VERSION_7; else export RELEASE_VERSION=$RELEASE_VERSION_6; fi
+ - export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A6
+ - export RELEASE_VERSION=$RELEASE_VERSION_6
- export WINDOWS_DDNPM_DRIVER=$(inv release.get-release-json-value "$RELEASE_VERSION::WINDOWS_DDNPM_DRIVER")
- cd $DD_AGENT_TESTING_DIR
- tasks/kitchen_setup.sh
@@ -95,8 +95,8 @@
KITCHEN_PLATFORM: "windows"
KITCHEN_OSVERS: "win2016"
before_script: # Use only 2016 and 2019 for testing that we upgrade properly and don't install the driver when not specified
- - if [ $AGENT_MAJOR_VERSION == "7" ]; then export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A7; else export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A6; fi
- - if [ $AGENT_MAJOR_VERSION == "7" ]; then export RELEASE_VERSION=$RELEASE_VERSION_7; else export RELEASE_VERSION=$RELEASE_VERSION_6; fi
+ - export WINDOWS_TESTING_S3_BUCKET=$WINDOWS_TESTING_S3_BUCKET_A6
+ - export RELEASE_VERSION=$RELEASE_VERSION_6
- export WINDOWS_DDNPM_DRIVER=$(inv release.get-release-json-value "$RELEASE_VERSION::WINDOWS_DDNPM_DRIVER")
- cd $DD_AGENT_TESTING_DIR
- tasks/kitchen_setup.sh
@@ -134,26 +134,20 @@
- .kitchen_os_windows
needs: ["deploy_windows_testing-a6"]
-.kitchen_scenario_windows_a7:
- extends:
- - .kitchen_agent_a7
- - .kitchen_os_windows
- needs: ["deploy_windows_testing-a7"]
-
# Kitchen: final test matrix (test types * scenarios)
# ----------------------------------------------
-kitchen_windows_installer_npm_install_scenarios-a7:
+kitchen_windows_installer_npm_install_scenarios-a6:
extends:
- - .kitchen_scenario_windows_a7
+ - .kitchen_scenario_windows_a6
- .kitchen_test_windows_installer_npm
-kitchen_windows_installer_npm_driver-a7:
+kitchen_windows_installer_npm_driver-a6:
# Run NPM driver installer test on branches, on a reduced number of platforms
rules:
- !reference [.on_default_kitchen_tests_a7]
+ !reference [.on_default_kitchen_tests]
extends:
- - .kitchen_scenario_windows_a7
+ - .kitchen_scenario_windows_a6
- .kitchen_test_windows_installer_driver
kitchen_windows_installer_agent-a6:
@@ -161,38 +155,23 @@ kitchen_windows_installer_agent-a6:
- .kitchen_scenario_windows_a6
- .kitchen_test_windows_installer_agent
-kitchen_windows_installer_agent-a7:
- extends:
- - .kitchen_scenario_windows_a7
- - .kitchen_test_windows_installer_agent
-
-kitchen_windows_upgrade5_agent-a6:
- extends:
- - .kitchen_scenario_windows_a6
- - .kitchen_test_upgrade5_agent
-
-kitchen_windows_upgrade5_agent-a7:
- extends:
- - .kitchen_scenario_windows_a7
- - .kitchen_test_upgrade5_agent
-
kitchen_windows_upgrade6_agent-a7:
extends:
- - .kitchen_scenario_windows_a7
+ - .kitchen_scenario_windows_a6
- .kitchen_test_upgrade6_agent
-kitchen_windows_process_agent-a7:
+kitchen_windows_process_agent-a6:
variables:
KITCHEN_OSVERS: "win2022"
DEFAULT_KITCHEN_OSVERS: "win2022"
extends:
- - .kitchen_scenario_windows_a7
+ - .kitchen_scenario_windows_a6
- .kitchen_test_process_agent
-kitchen_windows_installer_cws-a7:
+kitchen_windows_installer_cws-a6:
# Run NPM driver installer test on branches, on a reduced number of platforms
rules:
- !reference [.on_default_kitchen_tests_a7]
+ !reference [.on_default_kitchen_tests]
extends:
- - .kitchen_scenario_windows_a7
+ - .kitchen_scenario_windows_a6
- .kitchen_test_windows_cws
diff --git a/.gitlab/maintenance_jobs/docker.yml b/.gitlab/maintenance_jobs/docker.yml
index af292b2e0506e..002452b3c13e9 100644
--- a/.gitlab/maintenance_jobs/docker.yml
+++ b/.gitlab/maintenance_jobs/docker.yml
@@ -1,11 +1,11 @@
---
include:
- - /.gitlab/common/container_publish_job_templates.yml
+ - .gitlab/common/container_publish_job_templates.yml
#
# Use these steps to revert the latest tags to a previous release
# while maintaining content trust signatures
-# - Create a pipeline on main with the RELEASE_6 and/or RELEASE_7 env vars
+# - Create a pipeline on main with the RELEASE_6 env vars
# - in the gitlab pipeline view, trigger the step (in the first column)
#
revert_latest_6:
@@ -22,28 +22,6 @@ revert_latest_6:
- IMG_SOURCES: datadog/agent:${NEW_LATEST_RELEASE_6}-jmx
IMG_DESTINATIONS: agent:6-jmx,agent:latest-py2-jmx
-revert_latest_7:
- extends: .docker_publish_job_definition
- rules: !reference [.on_main_manual]
- stage: maintenance_jobs
- variables:
- NEW_LATEST_RELEASE_7: "" # tag name of the non-jmx version, for example "7.21.0"
- IMG_REGISTRIES: public
- parallel:
- matrix:
- - IMG_SOURCES: datadog/agent:${NEW_LATEST_RELEASE_7}
- IMG_DESTINATIONS: agent:7,agent:latest
- - IMG_SOURCES: datadog/agent:${NEW_LATEST_RELEASE_7}-jmx
- IMG_DESTINATIONS: agent:7-jmx,agent:latest-jmx
- - IMG_SOURCES: datadog/agent:${NEW_LATEST_RELEASE_7}-servercore
- IMG_DESTINATIONS: agent:7-servercore,agent:latest-servercore
- - IMG_SOURCES: datadog/agent:${NEW_LATEST_RELEASE_7}-servercore-jmx
- IMG_DESTINATIONS: agent:7-servercore-jmx,agent:latest-servercore-jmx
- - IMG_SOURCES: datadog/dogstatsd:${NEW_LATEST_RELEASE_7}
- IMG_DESTINATIONS: dogstatsd:7,dogstatsd:latest
- - IMG_SOURCES: datadog/cluster-agent:${NEW_LATEST_RELEASE_7}
- IMG_DESTINATIONS: cluster-agent:latest
-
#
# Use this step to delete a tag of a given image
# We call the Docker Hub API because docker cli doesn't support deleting tags
diff --git a/.gitlab/maintenance_jobs/include.yml b/.gitlab/maintenance_jobs/include.yml
index dfc155470c9bd..c8c7ddf8d2634 100644
--- a/.gitlab/maintenance_jobs/include.yml
+++ b/.gitlab/maintenance_jobs/include.yml
@@ -4,5 +4,5 @@
# as well as jobs which periodically clean up kitchen resources.
include:
- - /.gitlab/maintenance_jobs/docker.yml
- - /.gitlab/maintenance_jobs/kitchen.yml
+ - .gitlab/maintenance_jobs/docker.yml
+ - .gitlab/maintenance_jobs/kitchen.yml
diff --git a/.gitlab/notify/notify.yml b/.gitlab/notify/notify.yml
index ef7dbdd8f02f3..b4cc554fbbee9 100644
--- a/.gitlab/notify/notify.yml
+++ b/.gitlab/notify/notify.yml
@@ -63,7 +63,7 @@ notify_github:
- !reference [.except_mergequeue]
- !reference [.except_main_or_release_branch]
- !reference [.except_no_tests_no_deploy]
- - if: $RELEASE_VERSION_7 != ""
+ - if: $RELEASE_VERSION_6 != ""
changes:
paths:
- '**/*.go'
@@ -71,7 +71,7 @@ notify_github:
when: on_success
- when: never
needs:
- - job: "deploy_deb_testing-a7_x64"
+ - job: "deploy_deb_testing-a6_x64"
optional: true
dependencies: []
allow_failure: true
diff --git a/.gitlab/package_build/deb.yml b/.gitlab/package_build/deb.yml
index 4e2e6ff546a5e..7ec763bb7f68d 100644
--- a/.gitlab/package_build/deb.yml
+++ b/.gitlab/package_build/deb.yml
@@ -36,31 +36,11 @@
paths:
- $OMNIBUS_PACKAGE_DIR
-.updater_build_common_deb:
- script:
- - source /root/.bashrc
- - !reference [.setup_ruby_mirror_linux]
- - !reference [.retrieve_linux_go_deps]
- - echo "About to build for $RELEASE_VERSION"
- # remove artifacts from previous pipelines that may come from the cache
- - rm -rf $OMNIBUS_PACKAGE_DIR/*
- # Artifacts and cache must live within project directory but we run omnibus in a neutral directory.
- # Thus, we move the artifacts at the end in a gitlab-friendly dir.
- - *setup_deb_signing_key
- - inv -e updater.omnibus-build --release-version "$RELEASE_VERSION" --base-dir $OMNIBUS_BASE_DIR ${USE_S3_CACHING} --skip-deps --go-mod-cache="$GOPATH/pkg/mod"
- - ls -la $OMNIBUS_PACKAGE_DIR
- - $S3_CP_CMD $OMNIBUS_PACKAGE_DIR/datadog-updater_*_${PACKAGE_ARCH}.deb $S3_ARTIFACTS_URI/$DESTINATION_DEB
- - $S3_CP_CMD $OMNIBUS_PACKAGE_DIR/datadog-updater-dbg_*_${PACKAGE_ARCH}.deb $S3_ARTIFACTS_URI/$DESTINATION_DBG_DEB
- - !reference [.upload_sbom_artifacts]
- artifacts:
- expire_in: 2 weeks
- paths:
- - $OMNIBUS_PACKAGE_DIR
-
agent_deb-x64-a6:
extends: .agent_build_common_deb
rules:
- - !reference [.on_a6]
+ - !reference [.except_mergequeue]
+ - when: on_success
stage: package_build
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
@@ -80,35 +60,11 @@ agent_deb-x64-a6:
before_script:
- export RELEASE_VERSION=$RELEASE_VERSION_6
-agent_deb-x64-a7:
- extends: .agent_build_common_deb
- rules:
- - !reference [.on_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs:
- [
- "go_mod_tidy_check",
- "build_system-probe-x64",
- "go_deps",
- "generate_minimized_btfs_x64",
- ]
- variables:
- AGENT_MAJOR_VERSION: 7
- PYTHON_RUNTIMES: "3"
- PACKAGE_ARCH: amd64
- DESTINATION_DEB: "datadog-agent_7_amd64.deb"
- DESTINATION_DBG_DEB: "datadog-agent-dbg_7_amd64.deb"
- before_script:
- - export RELEASE_VERSION=$RELEASE_VERSION_7
-
agent_deb-arm64-a6:
extends: .agent_build_common_deb
rules:
- - !reference [.on_all_builds_a6]
- - !reference [.on_packaging_change]
- - !reference [.on_go-version_change]
+ - !reference [.except_mergequeue]
+ - when: on_success
stage: package_build
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES
tags: ["arch:arm64"]
@@ -128,251 +84,9 @@ agent_deb-arm64-a6:
before_script:
- export RELEASE_VERSION=$RELEASE_VERSION_6
-agent_deb-arm64-a7:
- extends: .agent_build_common_deb
- rules:
- - !reference [.on_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES
- tags: ["arch:arm64"]
- needs:
- [
- "go_mod_tidy_check",
- "build_system-probe-arm64",
- "go_deps",
- "generate_minimized_btfs_arm64",
- ]
- variables:
- AGENT_MAJOR_VERSION: 7
- PYTHON_RUNTIMES: "3"
- PACKAGE_ARCH: arm64
- DESTINATION_DEB: "datadog-agent_7_arm64.deb"
- DESTINATION_DBG_DEB: "datadog-agent-dbg_7_arm64.deb"
- before_script:
- - export RELEASE_VERSION=$RELEASE_VERSION_7
-
-.iot_agent_build_common_deb:
- script:
- - source /root/.bashrc
- - !reference [.setup_ruby_mirror_linux]
- - !reference [.setup_python_mirror_linux]
- - !reference [.retrieve_linux_go_deps]
- - echo "About to build for $RELEASE_VERSION_7"
- - echo "Detected host architecture $(uname -m)"
- # $DD_TARGET_ARCH is only set by Arm build images, so assume amd64 if not present
- - echo "Target architecture ${DD_TARGET_ARCH:=amd64}"
- # remove artifacts from previous pipelines that may come from the cache
- - rm -rf $OMNIBUS_PACKAGE_DIR/*
- # Artifacts and cache must live within project directory but we run omnibus in a neutral directory.
- # Thus, we move the artifacts at the end in a gitlab-friendly dir.
- - *setup_deb_signing_key
- # Use --skip-deps since the deps are installed by `before_script`.
- - inv -e agent.omnibus-build --flavor iot --log-level debug --release-version "$RELEASE_VERSION_7" --major-version 7 --base-dir $OMNIBUS_BASE_DIR --skip-deps --go-mod-cache="$GOPATH/pkg/mod"
- - ls -la $OMNIBUS_PACKAGE_DIR
- - $S3_CP_CMD $OMNIBUS_PACKAGE_DIR/datadog-iot-agent*_${PACKAGE_ARCH}.deb $S3_ARTIFACTS_URI/$DESTINATION_DEB
- - !reference [.upload_sbom_artifacts]
- variables:
- KUBERNETES_CPU_REQUEST: 8
- KUBERNETES_MEMORY_REQUEST: "16Gi"
- KUBERNETES_MEMORY_LIMIT: "16Gi"
- artifacts:
- expire_in: 2 weeks
- paths:
- - $OMNIBUS_PACKAGE_DIR
-
-iot_agent_deb-x64:
- extends: .iot_agent_build_common_deb
- rules:
- - !reference [.on_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs: ["go_mod_tidy_check", "go_deps"]
- variables:
- PACKAGE_ARCH: amd64
- DESTINATION_DEB: "datadog-iot-agent_7_amd64.deb"
-
-iot_agent_deb-arm64:
- extends: .iot_agent_build_common_deb
- rules:
- - !reference [.on_all_builds_a7]
- - !reference [.on_packaging_change]
- - !reference [.on_go-version_change]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES
- tags: ["arch:arm64"]
- needs: ["go_mod_tidy_check", "go_deps"]
- variables:
- PACKAGE_ARCH: arm64
- DESTINATION_DEB: "datadog-iot-agent_7_arm64.deb"
-
-iot_agent_deb-armhf:
- extends: .iot_agent_build_common_deb
- rules:
- - !reference [.on_all_builds_a7]
- - !reference [.on_packaging_change]
- - !reference [.on_go-version_change]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_armhf$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES
- # Run with platform:arm64 since no platform:armhf exists and arm64 should be backwards compatible
- tags: ["arch:arm64"]
- needs: ["go_mod_tidy_check", "go_deps"]
- variables:
- PACKAGE_ARCH: armhf
- DESTINATION_DEB: "datadog-iot-agent_7_armhf.deb"
-
-dogstatsd_deb-x64:
- rules:
- - !reference [.on_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs: ["go_mod_tidy_check", "build_dogstatsd-binary_x64", "go_deps"]
- variables:
- before_script:
- - source /root/.bashrc
- - !reference [.retrieve_linux_go_deps]
- script:
- # remove artifacts from previous pipelines that may come from the cache
- - rm -rf $OMNIBUS_PACKAGE_DIR/*
- - !reference [.setup_ruby_mirror_linux]
- # Artifacts and cache must live within project directory but we run omnibus in a neutral directory.
- # Thus, we move the artifacts at the end in a gitlab-friendly dir.
- - *setup_deb_signing_key
- # Use --skip-deps since the deps are installed by `before_script`.
- - inv -e dogstatsd.omnibus-build --release-version "$RELEASE_VERSION_7" --major-version 7 --base-dir $OMNIBUS_BASE_DIR ${USE_S3_CACHING} --skip-deps --go-mod-cache="$GOPATH/pkg/mod"
- - ls -la $OMNIBUS_PACKAGE_DIR
- - $S3_CP_CMD $OMNIBUS_PACKAGE_DIR/datadog-dogstatsd*_amd64.deb $S3_ARTIFACTS_URI/datadog-dogstatsd_amd64.deb
- - !reference [.upload_sbom_artifacts]
- artifacts:
- expire_in: 2 weeks
- paths:
- - $OMNIBUS_PACKAGE_DIR
-
-dogstatsd_deb-arm64:
- rules:
- - !reference [.on_all_builds_a7]
- - !reference [.on_packaging_change]
- - !reference [.on_go-version_change]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES
- tags: ["arch:arm64"]
- needs: ["go_mod_tidy_check", "build_dogstatsd-binary_arm64", "go_deps"]
- before_script:
- - source /root/.bashrc
- - !reference [.retrieve_linux_go_deps]
- script:
- # remove artifacts from previous pipelines that may come from the cache
- - rm -rf $OMNIBUS_PACKAGE_DIR/*
- - !reference [.setup_ruby_mirror_linux]
- # Artifacts and cache must live within project directory but we run omnibus in a neutral directory.
- # Thus, we move the artifacts at the end in a gitlab-friendly dir.
- - *setup_deb_signing_key
- # Use --skip-deps since the deps are installed by `before_script`.
- - inv -e dogstatsd.omnibus-build --release-version "$RELEASE_VERSION_7" --major-version 7 --base-dir $OMNIBUS_BASE_DIR ${USE_S3_CACHING} --skip-deps --go-mod-cache="$GOPATH/pkg/mod"
- - ls -la $OMNIBUS_PACKAGE_DIR
- - $S3_CP_CMD $OMNIBUS_PACKAGE_DIR/datadog-dogstatsd*_arm64.deb $S3_ARTIFACTS_URI/datadog-dogstatsd_arm64.deb
- - !reference [.upload_sbom_artifacts]
- artifacts:
- expire_in: 2 weeks
- paths:
- - $OMNIBUS_PACKAGE_DIR
-
-updater_deb-amd64:
- extends: .updater_build_common_deb
- rules: !reference [.on_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs: ["go_mod_tidy_check", "go_deps", "generate_minimized_btfs_x64"]
- variables:
- AGENT_MAJOR_VERSION: 7
- PACKAGE_ARCH: amd64
- DESTINATION_DEB: "datadog-updater_7_amd64.deb"
- DESTINATION_DBG_DEB: "datadog-updater-dbg_7_amd64.deb"
- before_script:
- - export RELEASE_VERSION=$RELEASE_VERSION_7
-
-updater_deb-arm64:
- extends: .updater_build_common_deb
- rules: !reference [.on_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES
- tags: ["arch:arm64"]
- needs: ["go_mod_tidy_check", "go_deps", "generate_minimized_btfs_arm64"]
- variables:
- AGENT_MAJOR_VERSION: 7
- PACKAGE_ARCH: arm64
- DESTINATION_DEB: "datadog-updater_7_arm64.deb"
- DESTINATION_DBG_DEB: "datadog-updater-dbg_7_arm64.deb"
- before_script:
- - export RELEASE_VERSION=$RELEASE_VERSION_7
-
-agentless_scanner_deb-x64:
- rules:
- !reference [.on_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs: ["go_mod_tidy_check", "build_agentless_scanner-binary_x64", "go_deps"]
- variables:
- before_script:
- - source /root/.bashrc
- - !reference [.retrieve_linux_go_deps]
- script:
- # remove artifacts from previous pipelines that may come from the cache
- - rm -rf $OMNIBUS_PACKAGE_DIR/*
- - !reference [.setup_ruby_mirror_linux]
- # Artifacts and cache must live within project directory but we run omnibus in a neutral directory.
- # Thus, we move the artifacts at the end in a gitlab-friendly dir.
- - *setup_deb_signing_key
- # Use --skip-deps since the deps are installed by `before_script`.
- - inv -e agentless-scanner.omnibus-build --release-version "$RELEASE_VERSION_7" --major-version 7 --base-dir $OMNIBUS_BASE_DIR ${USE_S3_CACHING} --skip-deps --go-mod-cache="$GOPATH/pkg/mod"
- - ls -la $OMNIBUS_PACKAGE_DIR
- - $S3_CP_CMD $OMNIBUS_PACKAGE_DIR/datadog-agentless-scanner*_amd64.deb $S3_ARTIFACTS_URI/datadog-agentless-scanner_amd64.deb
- - !reference [.upload_sbom_artifacts]
- artifacts:
- expire_in: 2 weeks
- paths:
- - $OMNIBUS_PACKAGE_DIR
-
-agentless_scanner_deb-arm64:
- rules:
- !reference [.on_all_builds_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES
- tags: ["arch:arm64"]
- needs: ["go_mod_tidy_check", "build_agentless_scanner-binary_arm64", "go_deps"]
- before_script:
- - source /root/.bashrc
- - !reference [.retrieve_linux_go_deps]
- script:
- # remove artifacts from previous pipelines that may come from the cache
- - rm -rf $OMNIBUS_PACKAGE_DIR/*
- - !reference [.setup_ruby_mirror_linux]
- # Artifacts and cache must live within project directory but we run omnibus in a neutral directory.
- # Thus, we move the artifacts at the end in a gitlab-friendly dir.
- - *setup_deb_signing_key
- # Use --skip-deps since the deps are installed by `before_script`.
- - inv -e agentless-scanner.omnibus-build --release-version "$RELEASE_VERSION_7" --major-version 7 --base-dir $OMNIBUS_BASE_DIR ${USE_S3_CACHING} --skip-deps --go-mod-cache="$GOPATH/pkg/mod"
- - ls -la $OMNIBUS_PACKAGE_DIR
- - $S3_CP_CMD $OMNIBUS_PACKAGE_DIR/datadog-agentless-scanner*_arm64.deb $S3_ARTIFACTS_URI/datadog-agentless-scanner_arm64.deb
- - !reference [.upload_sbom_artifacts]
- artifacts:
- expire_in: 2 weeks
- paths:
- - $OMNIBUS_PACKAGE_DIR
-
agent_heroku_deb-x64-a6:
extends: agent_deb-x64-a6
variables:
DESTINATION_DEB: "datadog-heroku-agent_6_amd64.deb"
DESTINATION_DBG_DEB: "datadog-heroku-agent-dbg_6_amd64.deb"
FLAVOR: heroku
-
-agent_heroku_deb-x64-a7:
- extends: agent_deb-x64-a7
- variables:
- DESTINATION_DEB: "datadog-heroku-agent_7_amd64.deb"
- DESTINATION_DBG_DEB: "datadog-heroku-agent-dbg_7_amd64.deb"
- FLAVOR: heroku
diff --git a/.gitlab/package_build/dmg.yml b/.gitlab/package_build/dmg.yml
deleted file mode 100644
index 7786fbfceb4df..0000000000000
--- a/.gitlab/package_build/dmg.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-.agent_build_common_dmg:
- script:
- - echo "About to build for $RELEASE_VERSION"
- # remove artifacts from previous pipelines that may come from the cache
- - rm -rf $OMNIBUS_PACKAGE_DIR/*
- - mkdir -p $OMNIBUS_PACKAGE_DIR
- - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_SSM_NAME)
- - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_SSM_NAME)
- - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_SSM_NAME)
- - $S3_CP_CMD $S3_ARTIFACTS_URI/agent-version.cache .
- - export VERSION_CACHE_CONTENT=$(cat agent-version.cache | base64 -)
- - !reference [.setup_python_mirror_linux]
- - python3 -m pip install -r tasks/libs/requirements-github.txt
- - inv -e github.trigger-macos --workflow-type "build" --datadog-agent-ref "$CI_COMMIT_SHA" --release-version "$RELEASE_VERSION" --major-version "$AGENT_MAJOR_VERSION" --python-runtimes "$PYTHON_RUNTIMES" --destination "$OMNIBUS_PACKAGE_DIR" --version-cache "$VERSION_CACHE_CONTENT" --integrations-core-ref "$INTEGRATIONS_CORE_VERSION"
- - !reference [.upload_sbom_artifacts]
- timeout: 3h # MacOS builds can take 1h~2h, increase the timeout to avoid timeout flakes
- artifacts:
- expire_in: 2 weeks
- paths:
- - $OMNIBUS_PACKAGE_DIR
-
-agent_dmg-x64-a7:
- extends: .agent_build_common_dmg
- rules:
- - !reference [.on_packaging_change]
- - !reference [.on_main_or_release_branch]
- - !reference [.on_all_builds]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs: ["go_mod_tidy_check"]
- variables:
- AGENT_MAJOR_VERSION: 7
- PYTHON_RUNTIMES: "3"
- timeout: 6h
- before_script:
- - source /root/.bashrc
- - export RELEASE_VERSION=$RELEASE_VERSION_7
diff --git a/.gitlab/package_build/include.yml b/.gitlab/package_build/include.yml
index fb9bb422bca5b..fe98c18dcb9ab 100644
--- a/.gitlab/package_build/include.yml
+++ b/.gitlab/package_build/include.yml
@@ -10,9 +10,7 @@
- If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" }
include:
- - /.gitlab/package_build/deb.yml
- - /.gitlab/package_build/dmg.yml
- - /.gitlab/package_build/remote_updater.yml
- - /.gitlab/package_build/rpm.yml
- - /.gitlab/package_build/suse_rpm.yml
- - /.gitlab/package_build/windows.yml
+ - .gitlab/package_build/deb.yml
+ - .gitlab/package_build/rpm.yml
+ - .gitlab/package_build/suse_rpm.yml
+ - .gitlab/package_build/windows.yml
diff --git a/.gitlab/package_build/remote_updater.yml b/.gitlab/package_build/remote_updater.yml
deleted file mode 100644
index c6ee8f8c17420..0000000000000
--- a/.gitlab/package_build/remote_updater.yml
+++ /dev/null
@@ -1,79 +0,0 @@
----
-.agent_build_common_remote_updater:
- script:
- - echo "About to build for $RELEASE_VERSION"
- - export INSTALL_DIR=/opt/datadog-packages/datadog-agent/$(inv agent.version -u)-1
- - !reference [.setup_ruby_mirror_linux]
- - !reference [.setup_python_mirror_linux]
- - !reference [.retrieve_linux_go_deps]
- # remove artifacts from previous pipelines that may come from the cache
- - rm -rf $OMNIBUS_PACKAGE_DIR/*
- # Artifacts and cache must live within project directory but we run omnibus in a neutral directory.
- # Thus, we move the artifacts at the end in a gitlab-friendly dir.
- - tar -xf $CI_PROJECT_DIR/sysprobe-build-outputs.tar.xz
- - mkdir -p /tmp/system-probe
- - $S3_CP_CMD $S3_PERMANENT_ARTIFACTS_URI/clang-$CLANG_LLVM_VER.${PACKAGE_ARCH} /tmp/system-probe/clang-bpf
- - $S3_CP_CMD $S3_PERMANENT_ARTIFACTS_URI/llc-$CLANG_LLVM_VER.${PACKAGE_ARCH} /tmp/system-probe/llc-bpf
- - cp $CI_PROJECT_DIR/minimized-btfs.tar.xz /tmp/system-probe/minimized-btfs.tar.xz
- - chmod 0744 /tmp/system-probe/clang-bpf /tmp/system-probe/llc-bpf
- # NOTE: for now, we consider "ociru" to be a "redhat_target" in omnibus/lib/ostools.rb
- # if we ever start building on a different platform, that might need to change
- - inv -e agent.omnibus-build --release-version "$RELEASE_VERSION" --major-version "$AGENT_MAJOR_VERSION" --python-runtimes "$PYTHON_RUNTIMES" --base-dir $OMNIBUS_BASE_DIR ${USE_S3_CACHING} --skip-deps --go-mod-cache="$GOPATH/pkg/mod" --system-probe-bin=/tmp/system-probe --host-distribution=ociru
- - ls -la $OMNIBUS_PACKAGE_DIR
- - !reference [.upload_sbom_artifacts]
- variables:
- KUBERNETES_CPU_REQUEST: 16
- KUBERNETES_MEMORY_REQUEST: "32Gi"
- KUBERNETES_MEMORY_LIMIT: "32Gi"
- artifacts:
- expire_in: 2 weeks
- paths:
- - $OMNIBUS_PACKAGE_DIR
-
-# build Agent package for remote_updater-x64
-agent_remote_updater-x64-a7:
- extends: .agent_build_common_remote_updater
- rules:
- - !reference [.on_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs:
- [
- "go_mod_tidy_check",
- "build_system-probe-x64",
- "go_deps",
- "generate_minimized_btfs_x64",
- ]
- variables:
- AGENT_MAJOR_VERSION: 7
- PYTHON_RUNTIMES: "3"
- PACKAGE_ARCH: amd64
- DESTINATION_OCI: "datadog-agent-7-remote-updater-amd64.tar.xz"
- before_script:
- - source /root/.bashrc
- - export RELEASE_VERSION=$RELEASE_VERSION_7
-
-# build Agent package for remote_updater-x64
-agent_remote_updater-arm64-a7:
- extends: .agent_build_common_remote_updater
- rules:
- - !reference [.on_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_arm64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:arm64"]
- needs:
- [
- "go_mod_tidy_check",
- "build_system-probe-arm64",
- "go_deps",
- "generate_minimized_btfs_arm64",
- ]
- variables:
- AGENT_MAJOR_VERSION: 7
- PYTHON_RUNTIMES: "3"
- PACKAGE_ARCH: arm64
- DESTINATION_OCI: "datadog-agent-7-remote-updater-arm64.tar.xz"
- before_script:
- - source /root/.bashrc
- - export RELEASE_VERSION=$RELEASE_VERSION_7
diff --git a/.gitlab/package_build/rpm.yml b/.gitlab/package_build/rpm.yml
index fdc5f5ce006b2..67e55282890d1 100644
--- a/.gitlab/package_build/rpm.yml
+++ b/.gitlab/package_build/rpm.yml
@@ -34,7 +34,8 @@
agent_rpm-x64-a6:
extends: .agent_build_common_rpm
rules:
- - !reference [.on_a6]
+ - !reference [.except_mergequeue]
+ - when: on_success
stage: package_build
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
@@ -53,36 +54,12 @@ agent_rpm-x64-a6:
- source /root/.bashrc
- export RELEASE_VERSION=$RELEASE_VERSION_6
-# build Agent package for rpm-x64
-agent_rpm-x64-a7:
- extends: .agent_build_common_rpm
- rules:
- - !reference [.on_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs:
- [
- "go_mod_tidy_check",
- "build_system-probe-x64",
- "go_deps",
- "generate_minimized_btfs_x64",
- ]
- variables:
- AGENT_MAJOR_VERSION: 7
- PYTHON_RUNTIMES: "3"
- PACKAGE_ARCH: amd64
- before_script:
- - source /root/.bashrc
- - export RELEASE_VERSION=$RELEASE_VERSION_7
-
# build Agent package for rpm-arm64
agent_rpm-arm64-a6:
extends: .agent_build_common_rpm
rules:
- - !reference [.on_all_builds_a6]
- - !reference [.on_packaging_change]
- - !reference [.on_go-version_change]
+ - !reference [.except_mergequeue]
+ - when: on_success
stage: package_build
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_arm64$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES
tags: ["arch:arm64"]
@@ -100,184 +77,3 @@ agent_rpm-arm64-a6:
before_script:
- source /root/.bashrc
- export RELEASE_VERSION=$RELEASE_VERSION_6
-
-# build Agent package for rpm-arm64
-agent_rpm-arm64-a7:
- extends: .agent_build_common_rpm
- rules:
- - !reference [.on_all_builds_a7]
- - !reference [.on_packaging_change]
- - !reference [.on_go-version_change]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_arm64$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES
- tags: ["arch:arm64"]
- needs:
- [
- "go_mod_tidy_check",
- "build_system-probe-arm64",
- "go_deps",
- "generate_minimized_btfs_arm64",
- ]
- variables:
- AGENT_MAJOR_VERSION: 7
- PYTHON_RUNTIMES: "3"
- PACKAGE_ARCH: arm64
- before_script:
- - source /root/.bashrc
- - export RELEASE_VERSION=$RELEASE_VERSION_7
-
-.iot_agent_build_common_rpm:
- script:
- - echo "About to build iot agent for $RELEASE_VERSION_7"
- - source /root/.bashrc
- - !reference [.setup_ruby_mirror_linux]
- - !reference [.setup_python_mirror_linux]
- - !reference [.retrieve_linux_go_deps]
- - echo "Detected host architecture $(uname -m)"
- # $DD_TARGET_ARCH is only set by Arm build images, so assume amd64 if not present
- - echo "Target architecture ${DD_TARGET_ARCH:=amd64}"
- # remove artifacts from previous pipelines that may come from the cache
- - rm -rf $OMNIBUS_PACKAGE_DIR/*
- # Artifacts and cache must live within project directory but we run omnibus in a neutral directory.
- # Thus, we move the artifacts at the end in a gitlab-friendly dir.
- - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY_SSM_NAME)
- - printf -- "$RPM_GPG_KEY" | gpg --import --batch
- - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE_SSM_NAME)
- # Use --skip-deps since the deps are installed by `before_script`.
- - inv -e agent.omnibus-build --flavor iot --log-level debug --release-version "$RELEASE_VERSION_7" --major-version 7 --base-dir $OMNIBUS_BASE_DIR --skip-deps --go-mod-cache="$GOPATH/pkg/mod"
- - ls -la $OMNIBUS_PACKAGE_DIR
- - !reference [.upload_sbom_artifacts]
- variables:
- KUBERNETES_CPU_REQUEST: 8
- KUBERNETES_MEMORY_REQUEST: "16Gi"
- KUBERNETES_MEMORY_LIMIT: "16Gi"
- artifacts:
- expire_in: 2 weeks
- paths:
- - $OMNIBUS_PACKAGE_DIR
-
-iot_agent_rpm-x64:
- extends: .iot_agent_build_common_rpm
- rules:
- - !reference [.on_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs: ["go_mod_tidy_check", "go_deps"]
-
-iot_agent_rpm-arm64:
- extends: .iot_agent_build_common_rpm
- rules:
- - !reference [.on_all_builds_a7]
- - !reference [.on_packaging_change]
- - !reference [.on_go-version_change]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_arm64$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES
- tags: ["arch:arm64"]
- needs: ["go_mod_tidy_check", "go_deps"]
-
-iot_agent_rpm-armhf:
- extends: .iot_agent_build_common_rpm
- rules:
- - !reference [.on_all_builds_a7]
- - !reference [.on_packaging_change]
- - !reference [.on_go-version_change]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_armhf$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES
- # Run with platform:arm64 since no platform:armhf exists and arm64 should be backwards compatible
- tags: ["arch:arm64"]
- needs: ["go_mod_tidy_check", "go_deps"]
- before_script:
- # Ensures uname -m reports armv7l
- - export LD_PRELOAD="/usr/local/lib/libfakearmv7l.so"
-
-dogstatsd_rpm-x64:
- rules:
- - !reference [.on_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs: ["go_mod_tidy_check", "build_dogstatsd-binary_x64", "go_deps"]
- before_script:
- - source /root/.bashrc
- - !reference [.retrieve_linux_go_deps]
- script:
- # remove artifacts from previous pipelines that may come from the cache
- - rm -rf $OMNIBUS_PACKAGE_DIR/*
- - !reference [.setup_ruby_mirror_linux]
- # Artifacts and cache must live within project directory but we run omnibus
- # from the GOPATH (see above). We then call `invoke` passing --base-dir,
- # pointing to a gitlab-friendly location.
- - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY_SSM_NAME)
- - printf -- "$RPM_GPG_KEY" | gpg --import --batch
- - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE_SSM_NAME)
- # Use --skip-deps since the deps are installed by `before_script`.
- - inv -e dogstatsd.omnibus-build --release-version "$RELEASE_VERSION_7" --major-version 7 --base-dir $OMNIBUS_BASE_DIR ${USE_S3_CACHING} --skip-deps --go-mod-cache="$GOPATH/pkg/mod"
- - ls -la $OMNIBUS_PACKAGE_DIR
- - !reference [.upload_sbom_artifacts]
- artifacts:
- expire_in: 2 weeks
- paths:
- - $OMNIBUS_PACKAGE_DIR
-
-agentless_scanner_rpm-x64:
- rules:
- !reference [.on_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs: ["go_mod_tidy_check", "build_agentless_scanner-binary_x64", "go_deps"]
- variables:
- before_script:
- - source /root/.bashrc
- - !reference [.retrieve_linux_go_deps]
- script:
- # remove artifacts from previous pipelines that may come from the cache
- - rm -rf $OMNIBUS_PACKAGE_DIR/*
- - !reference [.setup_ruby_mirror_linux]
- # Artifacts and cache must live within project directory but we run omnibus
- # from the GOPATH (see above). We then call `invoke` passing --base-dir,
- # pointing to a gitlab-friendly location.
- - set +x
- - RPM_GPG_KEY=$(aws ssm get-parameter --region us-east-1 --name $RPM_GPG_KEY_SSM_NAME --with-decryption --query "Parameter.Value" --out text)
- - printf -- "$RPM_GPG_KEY" | gpg --import --batch
- - export RPM_SIGNING_PASSPHRASE=$(aws ssm get-parameter --region us-east-1 --name $RPM_SIGNING_PASSPHRASE_SSM_NAME --with-decryption --query "Parameter.Value" --out text)
- # Use --skip-deps since the deps are installed by `before_script`.
- - inv -e agentless-scanner.omnibus-build --release-version "$RELEASE_VERSION_7" --major-version 7 --base-dir $OMNIBUS_BASE_DIR ${USE_S3_CACHING} --skip-deps --go-mod-cache="$GOPATH/pkg/mod"
- - ls -la $OMNIBUS_PACKAGE_DIR
- - !reference [.upload_sbom_artifacts]
- artifacts:
- expire_in: 2 weeks
- paths:
- - $OMNIBUS_PACKAGE_DIR
-
-agentless_scanner_rpm-arm64:
- rules:
- !reference [.on_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_arm64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:arm64"]
- needs: ["go_mod_tidy_check", "build_agentless_scanner-binary_arm64", "go_deps"]
- variables:
- before_script:
- - source /root/.bashrc
- - !reference [.retrieve_linux_go_deps]
- script:
- # remove artifacts from previous pipelines that may come from the cache
- - rm -rf $OMNIBUS_PACKAGE_DIR/*
- - !reference [.setup_ruby_mirror_linux]
- # Artifacts and cache must live within project directory but we run omnibus
- # from the GOPATH (see above). We then call `invoke` passing --base-dir,
- # pointing to a gitlab-friendly location.
- - set +x
- - RPM_GPG_KEY=$(aws ssm get-parameter --region us-east-1 --name $RPM_GPG_KEY_SSM_NAME --with-decryption --query "Parameter.Value" --out text)
- - printf -- "$RPM_GPG_KEY" | gpg --import --batch
- - export RPM_SIGNING_PASSPHRASE=$(aws ssm get-parameter --region us-east-1 --name $RPM_SIGNING_PASSPHRASE_SSM_NAME --with-decryption --query "Parameter.Value" --out text)
- # Use --skip-deps since the deps are installed by `before_script`.
- - inv -e agentless-scanner.omnibus-build --release-version "$RELEASE_VERSION_7" --major-version 7 --base-dir $OMNIBUS_BASE_DIR ${USE_S3_CACHING} --skip-deps --go-mod-cache="$GOPATH/pkg/mod"
- - ls -la $OMNIBUS_PACKAGE_DIR
- - !reference [.upload_sbom_artifacts]
- artifacts:
- expire_in: 2 weeks
- paths:
- - $OMNIBUS_PACKAGE_DIR
diff --git a/.gitlab/package_build/suse_rpm.yml b/.gitlab/package_build/suse_rpm.yml
index c0e0a0e7b799a..914ec377820e9 100644
--- a/.gitlab/package_build/suse_rpm.yml
+++ b/.gitlab/package_build/suse_rpm.yml
@@ -37,7 +37,8 @@
agent_suse-x64-a6:
extends: .agent_build_common_suse_rpm
rules:
- - !reference [.on_a6]
+ - !reference [.except_mergequeue]
+ - when: on_success
stage: package_build
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/suse_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
@@ -56,37 +57,13 @@ agent_suse-x64-a6:
- source /root/.bashrc
- export RELEASE_VERSION=$RELEASE_VERSION_6
-# build Agent package for suse-x64
-agent_suse-x64-a7:
- extends: .agent_build_common_suse_rpm
- rules:
- - !reference [.on_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/suse_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs:
- [
- "go_mod_tidy_check",
- "build_system-probe-x64",
- "go_deps",
- "generate_minimized_btfs_x64",
- ]
- variables:
- AGENT_MAJOR_VERSION: 7
- PYTHON_RUNTIMES: "3"
- PACKAGE_ARCH: amd64
- before_script:
- - source /root/.bashrc
- - export RELEASE_VERSION=$RELEASE_VERSION_7
-
# build Agent package for suse-arm64
# This is a bit hackish and mostly mimics the CentOS7/arm64 build
-agent_suse-arm64-a7:
+agent_suse-arm64-a6:
extends: .agent_build_common_suse_rpm
rules:
- - !reference [.on_all_builds_a7]
- - !reference [.on_packaging_change]
- - !reference [.on_go-version_change]
+ - !reference [.except_mergequeue]
+ - when: on_success
stage: package_build
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_arm64$DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX:$DATADOG_AGENT_ARMBUILDIMAGES
tags: ["arch:arm64"]
@@ -98,110 +75,10 @@ agent_suse-arm64-a7:
"generate_minimized_btfs_arm64",
]
variables:
- AGENT_MAJOR_VERSION: 7
+ AGENT_MAJOR_VERSION: 6
PYTHON_RUNTIMES: "3"
PACKAGE_ARCH: arm64
OMNIBUS_TASK_EXTRA_PARAMS: "--host-distribution=suse"
before_script:
- source /root/.bashrc
- - export RELEASE_VERSION=$RELEASE_VERSION_7
-
-iot_agent_suse-x64:
- rules:
- - !reference [.on_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/suse_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs: ["go_mod_tidy_check", "go_deps"]
- before_script:
- - source /root/.bashrc
- script:
- - echo "About to build iot agent for $RELEASE_VERSION_7"
- - !reference [.setup_ruby_mirror_linux]
- - !reference [.setup_python_mirror_linux]
- - !reference [.retrieve_linux_go_deps]
- - echo "Detected host architecture $(uname -m)"
- # $DD_TARGET_ARCH is only set by Arm build images, so assume amd64 if not present
- - echo "Target architecture ${DD_TARGET_ARCH:=amd64}"
- # remove artifacts from previous pipelines that may come from the cache
- - rm -rf $OMNIBUS_PACKAGE_DIR_SUSE/*
- # Artifacts and cache must live within project directory but we run omnibus in a neutral directory.
- # Thus, we move the artifacts at the end in a gitlab-friendly dir.
- - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY_SSM_NAME)
- - printf -- "$RPM_GPG_KEY" | gpg --import --batch
- - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE_SSM_NAME)
- # Use --skip-deps since the deps are installed by `before_script`.
- - inv -e agent.omnibus-build --flavor iot --log-level debug --release-version "$RELEASE_VERSION_7" --major-version 7 --base-dir $OMNIBUS_BASE_DIR --skip-deps --go-mod-cache="$GOPATH/pkg/mod"
- - ls -la $OMNIBUS_PACKAGE_DIR
- # Copy to a different directory to avoid collisions if a job downloads both the RPM and SUSE RPM artifacts
- - mkdir -p $OMNIBUS_PACKAGE_DIR_SUSE && cp $OMNIBUS_PACKAGE_DIR/* $OMNIBUS_PACKAGE_DIR_SUSE
- - !reference [.upload_sbom_artifacts]
- artifacts:
- expire_in: 2 weeks
- paths:
- - $OMNIBUS_PACKAGE_DIR_SUSE
-
-dogstatsd_suse-x64:
- rules:
- - !reference [.on_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/suse_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs: ["go_mod_tidy_check", "build_dogstatsd-binary_x64", "go_deps"]
- variables:
- before_script:
- - source /root/.bashrc
- - !reference [.retrieve_linux_go_deps]
- script:
- # remove artifacts from previous pipelines that may come from the cache
- - rm -rf $OMNIBUS_PACKAGE_DIR_SUSE/*
- - !reference [.setup_ruby_mirror_linux]
- # Artifacts and cache must live within project directory but we run omnibus
- # from the GOPATH (see above). We then call `invoke` passing --base-dir,
- # pointing to a gitlab-friendly location.
- - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_GPG_KEY_SSM_NAME)
- - printf -- "$RPM_GPG_KEY" | gpg --import --batch
- - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $RPM_SIGNING_PASSPHRASE_SSM_NAME)
- # Use --skip-deps since the deps are installed by `before_script`.
- - inv -e dogstatsd.omnibus-build --release-version "$RELEASE_VERSION_7" --major-version 7 --base-dir $OMNIBUS_BASE_DIR ${USE_S3_CACHING} --skip-deps --go-mod-cache="$GOPATH/pkg/mod"
- - ls -la $OMNIBUS_PACKAGE_DIR
- # Copy to a different directory to avoid collisions if a job downloads both the RPM and SUSE RPM artifacts
- - mkdir -p $OMNIBUS_PACKAGE_DIR_SUSE && cp $OMNIBUS_PACKAGE_DIR/* $OMNIBUS_PACKAGE_DIR_SUSE
- - !reference [.upload_sbom_artifacts]
- artifacts:
- expire_in: 2 weeks
- paths:
- - $OMNIBUS_PACKAGE_DIR_SUSE
-
-agentless_scanner_suse-x64:
- rules:
- !reference [.on_a7]
- stage: package_build
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/suse_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs: ["go_mod_tidy_check", "build_agentless_scanner-binary_x64", "go_deps"]
- variables:
- before_script:
- - source /root/.bashrc
- - !reference [.retrieve_linux_go_deps]
- script:
- # remove artifacts from previous pipelines that may come from the cache
- - rm -rf $OMNIBUS_PACKAGE_DIR_SUSE/*
- - !reference [.setup_ruby_mirror_linux]
- # Artifacts and cache must live within project directory but we run omnibus
- # from the GOPATH (see above). We then call `invoke` passing --base-dir,
- # pointing to a gitlab-friendly location.
- - set +x
- - RPM_GPG_KEY=$(aws ssm get-parameter --region us-east-1 --name $RPM_GPG_KEY_SSM_NAME --with-decryption --query "Parameter.Value" --out text)
- - printf -- "$RPM_GPG_KEY" | gpg --import --batch
- - export RPM_SIGNING_PASSPHRASE=$(aws ssm get-parameter --region us-east-1 --name $RPM_SIGNING_PASSPHRASE_SSM_NAME --with-decryption --query "Parameter.Value" --out text)
- # Use --skip-deps since the deps are installed by `before_script`.
- - inv -e agentless-scanner.omnibus-build --release-version "$RELEASE_VERSION_7" --major-version 7 --base-dir $OMNIBUS_BASE_DIR ${USE_S3_CACHING} --skip-deps --go-mod-cache="$GOPATH/pkg/mod"
- - ls -la $OMNIBUS_PACKAGE_DIR
- # Copy to a different directory to avoid collisions if a job downloads both the RPM and SUSE RPM artifacts
- - mkdir -p $OMNIBUS_PACKAGE_DIR_SUSE && cp $OMNIBUS_PACKAGE_DIR/* $OMNIBUS_PACKAGE_DIR_SUSE
- - !reference [.upload_sbom_artifacts]
- artifacts:
- expire_in: 2 weeks
- paths:
- - $OMNIBUS_PACKAGE_DIR_SUSE
+ - export RELEASE_VERSION=$RELEASE_VERSION_6
diff --git a/.gitlab/package_build/windows.yml b/.gitlab/package_build/windows.yml
index a0e8019824ceb..7c7058b8f2509 100644
--- a/.gitlab/package_build/windows.yml
+++ b/.gitlab/package_build/windows.yml
@@ -56,21 +56,23 @@
variables:
OMNIBUS_TARGET: main
-windows_msi_and_bosh_zip_x64-a7:
+windows_msi_and_bosh_zip_x64-a6:
extends: .windows_main_agent_base
rules:
- - !reference [.on_a7]
+ - !reference [.except_mergequeue]
+ - when: on_success
variables:
ARCH: "x64"
- AGENT_MAJOR_VERSION: 7
+ AGENT_MAJOR_VERSION: 6
PYTHON_RUNTIMES: "3"
before_script:
- - set RELEASE_VERSION $RELEASE_VERSION_7
+ - set RELEASE_VERSION $RELEASE_VERSION_6
windows_msi_x64-a6:
extends: .windows_main_agent_base
rules:
- - !reference [.on_a6]
+ - !reference [.except_mergequeue]
+ - when: on_success
variables:
ARCH: "x64"
AGENT_MAJOR_VERSION: 6
@@ -80,18 +82,19 @@ windows_msi_x64-a6:
timeout: 3h
# cloudfoundry IoT build for Windows
-windows_zip_agent_binaries_x64-a7:
+windows_zip_agent_binaries_x64-a6:
rules:
- - !reference [.on_a7]
+ - !reference [.except_mergequeue]
+ - when: on_success
stage: package_build
tags: ["runner:windows-docker", "windowsversion:1809"]
needs: ["go_mod_tidy_check", "go_deps"]
variables:
ARCH: "x64"
- AGENT_MAJOR_VERSION: 7
+ AGENT_MAJOR_VERSION: 6
OMNIBUS_TARGET: agent_binaries
before_script:
- - set RELEASE_VERSION $RELEASE_VERSION_7
+ - set RELEASE_VERSION $RELEASE_VERSION_6
script:
- $ErrorActionPreference = "Stop"
- '$_instance_id = (iwr -UseBasicParsing http://169.254.169.254/latest/meta-data/instance-id).content ; Write-Host "Running on instance $($_instance_id)"'
diff --git a/.gitlab/packaging/include.yml b/.gitlab/packaging/include.yml
deleted file mode 100644
index 30d6f150c982c..0000000000000
--- a/.gitlab/packaging/include.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-include:
- - /.gitlab/packaging/oci.yml
diff --git a/.gitlab/packaging/oci.yml b/.gitlab/packaging/oci.yml
deleted file mode 100644
index 764c055a30556..0000000000000
--- a/.gitlab/packaging/oci.yml
+++ /dev/null
@@ -1,71 +0,0 @@
-packaging_oci:
- stage: packaging
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- rules: !reference [.on_a7]
- needs:
- [
- "agent_remote_updater-x64-a7",
- "agent_remote_updater-arm64-a7",
- ]
- variables:
- KUBERNETES_CPU_REQUEST: 16
- KUBERNETES_MEMORY_REQUEST: "32Gi"
- KUBERNETES_MEMORY_LIMIT: "32Gi"
- OCI_PRODUCT: "datadog-agent"
- script:
- - python3 -m pip install -r tasks/libs/requirements-github.txt
- - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh ci.datadog-agent.platform-github-app-key)
- - export GITHUB_APP_ID=682216
- - export PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7)-1
- - export SIMPLE_VERSION=$(inv agent.version --no-include-git --no-include-pre --major-version 7)
- - set +x
- - export GH_TOKEN=$(inv -e github.get-token-from-app --app-id-env=GITHUB_APP_ID --pkey-env=GITHUB_KEY_B64)
- - git config --global credential.helper '!f() { echo username=x-access-token; echo "password=$GH_TOKEN"; };f'
- - cd /tmp/
- - for i in $(seq 1 5); do git clone --depth=1 https://github.com/DataDog/datadog-packages && break; done
- - cd datadog-packages/cmd/datadog-package
- - go build .
- - OUTPUT_DIR="/tmp/oci_output"
- - |
- for ARCH in "amd64" "arm64"; do
- INPUT_FILE="$OMNIBUS_PACKAGE_DIR/datadog${FLAVOR:+-$FLAVOR}-agent-*${ARCH}.tar.xz"
- OUTPUT_FILE="$(basename -a -s .xz $OMNIBUS_PACKAGE_DIR/*.tar.xz | head -n 1)"
- MERGED_FILE=$(basename -a $OMNIBUS_PACKAGE_DIR/*.tar.xz | head -n 1 | sed "s/-${ARCH}.tar.xz//").oci.tar
- export MERGED_FILE
- INPUT_DIR="/tmp/input_${ARCH}"
- mkdir -p ${INPUT_DIR}
- mkdir -p ${OUTPUT_DIR}
- echo "Generating OCI for $ARCH."
- echo "Extracting to temporary input dir $INPUT_FILE -> $INPUT_DIR"
- tar xJf ${INPUT_FILE} -C ${INPUT_DIR}
- echo "Creating OCI layer -> ${OUTPUT_DIR}/${OUTPUT_FILE}"
- if [ "${OCI_PRODUCT}" = "datadog-agent" ]; then
- EXTRA_FLAGS="--configs ${INPUT_DIR}/etc/datadog-agent"
- fi
- ./datadog-package create \
- --version ${PACKAGE_VERSION} \
- --package datadog-agent \
- --os linux \
- --arch ${ARCH} \
- --archive --archive-path "${OUTPUT_DIR}/${OUTPUT_FILE}" \
- ${EXTRA_FLAGS} \
- ${INPUT_DIR}/${INSTALL_DIR}/
- rm -f ${INPUT_FILE}
- done
- - echo "Aggregating all layers into one package -> ${MERGED_FILE}"
- - ls -l ${OUTPUT_DIR}/
- - ./datadog-package merge ${OUTPUT_DIR}/*.tar
- - mkdir -p ${OMNIBUS_PACKAGE_DIR}
- # We need to propagate the exact version in the pipeline artifact
- - cp merged.tar ${OMNIBUS_PACKAGE_DIR}/${MERGED_FILE}
- # Only the major version is needed in the S3 bucket
- - $S3_CP_CMD merged.tar $S3_ARTIFACTS_URI/datadog-agent_7_oci.tar
- before_script:
- - source /root/.bashrc
- - export PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7)-1
- - export INSTALL_DIR=/opt/datadog-packages/${OCI_PRODUCT}/${PACKAGE_VERSION}
- artifacts:
- paths:
- - ${OMNIBUS_PACKAGE_DIR}
-
diff --git a/.gitlab/pkg_metrics/pkg_metrics.yml b/.gitlab/pkg_metrics/pkg_metrics.yml
index 99d0c530ddd63..14619576949f2 100644
--- a/.gitlab/pkg_metrics/pkg_metrics.yml
+++ b/.gitlab/pkg_metrics/pkg_metrics.yml
@@ -33,7 +33,7 @@
send_pkg_size-a6:
allow_failure: true
- rules: !reference [.on_deploy_a6]
+ rules: !reference [.on_deploy]
stage: pkg_metrics
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
@@ -88,104 +88,6 @@ send_pkg_size-a6:
]}" \
"https://api.datadoghq.com/api/v1/series?api_key=$DD_API_KEY"
-send_pkg_size-a7:
- allow_failure: true
- rules: !reference [.on_deploy_a7]
- stage: pkg_metrics
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- needs:
- - agent_deb-x64-a7
- - agent_deb-arm64-a7
- - iot_agent_deb-x64
- - iot_agent_deb-arm64
- - dogstatsd_deb-x64
- - dogstatsd_deb-arm64
- - agent_heroku_deb-x64-a7
- - agent_rpm-arm64-a7
- - agent_rpm-x64-a7
- - iot_agent_rpm-x64
- - iot_agent_rpm-arm64
- - dogstatsd_rpm-x64
- - agent_suse-x64-a7
- - dogstatsd_suse-x64
- - iot_agent_suse-x64
- before_script:
- # FIXME: tmp while we uppdate the base image
- - apt-get install -y wget rpm2cpio cpio
- - ls -l $OMNIBUS_PACKAGE_DIR
- - ls -l $OMNIBUS_PACKAGE_DIR_SUSE
- script:
- - source /root/.bashrc
- - !reference [.add_metric_func, script]
-
- - source /root/.bashrc
- - mkdir -p /tmp/amd64-deb/agent /tmp/amd64-deb/dogstatsd /tmp/amd64-deb/iot-agent /tmp/amd64-deb/heroku-agent
- - mkdir -p /tmp/arm64-deb/agent /tmp/arm64-deb/dogstatsd /tmp/arm64-deb/iot-agent
- - mkdir -p /tmp/amd64-rpm/agent /tmp/amd64-rpm/dogstatsd /tmp/amd64-rpm/iot-agent
- - mkdir -p /tmp/arm64-rpm/agent /tmp/arm64-rpm/iot-agent
- - mkdir -p /tmp/amd64-suse/agent /tmp/amd64-suse/dogstatsd /tmp/amd64-suse/iot-agent
-
- - |
- add_metrics() {
- local base="${1}"
- local os="${2}"
- local arch="${3}"
-
- # record the total uncompressed size of each package
- for package in agent dogstatsd iot-agent heroku-agent; do
- if [ ! -d "${base}/${package}" ]; then continue; fi
- add_metric datadog.agent.package.size $(du -sB1 "${base}/${package}" | sed 's/\([0-9]\+\).\+/\1/') os:${os} package:${package} agent:7 bucket_branch:$BUCKET_BRANCH arch:${arch}
- done
-
- # record the size of each of the important binaries in each package
- add_metric datadog.agent.binary.size $(du -sB1 ${base}/agent/opt/datadog-agent/bin/agent/agent | sed 's/\([0-9]\+\).\+/\1/') bin:agent os:${os} package:agent agent:7 bucket_branch:$BUCKET_BRANCH arch:${arch}
- add_metric datadog.agent.binary.size $(du -sB1 ${base}/agent/opt/datadog-agent/embedded/bin/trace-agent | sed 's/\([0-9]\+\).\+/\1/') bin:trace-agent os:${os} package:agent agent:7 bucket_branch:$BUCKET_BRANCH arch:${arch}
- add_metric datadog.agent.binary.size $(du -sB1 ${base}/agent/opt/datadog-agent/embedded/bin/security-agent | sed 's/\([0-9]\+\).\+/\1/') bin:security-agent os:${os} package:agent agent:7 bucket_branch:$BUCKET_BRANCH arch:${arch}
- add_metric datadog.agent.binary.size $(du -sB1 ${base}/agent/opt/datadog-agent/embedded/bin/process-agent | sed 's/\([0-9]\+\).\+/\1/') bin:process-agent os:${os} package:agent agent:7 bucket_branch:$BUCKET_BRANCH arch:${arch}
- add_metric datadog.agent.binary.size $(du -sB1 ${base}/agent/opt/datadog-agent/embedded/bin/system-probe | sed 's/\([0-9]\+\).\+/\1/') bin:system-probe os:${os} package:agent agent:7 bucket_branch:$BUCKET_BRANCH arch:${arch}
- if [[ "$arch" == "amd64" || "$os" == "debian" ]]; then add_metric datadog.agent.binary.size $(du -sB1 ${base}/dogstatsd/opt/datadog-dogstatsd/bin/dogstatsd | sed 's/\([0-9]\+\).\+/\1/') bin:dogstatsd os:${os} package:dogstatsd agent:7 bucket_branch:$BUCKET_BRANCH arch:${arch}; fi
- add_metric datadog.agent.binary.size $(du -sB1 ${base}/iot-agent/opt/datadog-agent/bin/agent/agent | sed 's/\([0-9]\+\).\+/\1/') bin:agent os:${os} package:iot-agent agent:7 bucket_branch:$BUCKET_BRANCH arch:${arch}
- if [ -f "${base}/heroku-agent/opt/datadog-agent/bin/agent/agent" ]; then
- add_metric datadog.agent.binary.size $(du -sB1 ${base}/heroku-agent/opt/datadog-agent/bin/agent/agent | sed 's/\([0-9]\+\).\+/\1/') bin:agent os:${os} package:heroku-agent agent:7 bucket_branch:$BUCKET_BRANCH arch:${arch};
- fi
- }
-
- # We silence dpkg and cpio output so we don't exceed gitlab log limit
-
- # debian
- - dpkg -x $OMNIBUS_PACKAGE_DIR/datadog-agent_7*_amd64.deb /tmp/amd64-deb/agent > /dev/null
- - dpkg -x $OMNIBUS_PACKAGE_DIR/datadog-iot-agent_7*_amd64.deb /tmp/amd64-deb/iot-agent > /dev/null
- - dpkg -x $OMNIBUS_PACKAGE_DIR/datadog-dogstatsd_7*_amd64.deb /tmp/amd64-deb/dogstatsd > /dev/null
- - dpkg -x $OMNIBUS_PACKAGE_DIR/datadog-heroku-agent_7*_amd64.deb /tmp/amd64-deb/heroku-agent > /dev/null
- - add_metrics /tmp/amd64-deb debian amd64
-
- # debian arm64
- - dpkg -x $OMNIBUS_PACKAGE_DIR/datadog-agent_7*_arm64.deb /tmp/arm64-deb/agent > /dev/null
- - dpkg -x $OMNIBUS_PACKAGE_DIR/datadog-iot-agent_7*_arm64.deb /tmp/arm64-deb/iot-agent > /dev/null
- - dpkg -x $OMNIBUS_PACKAGE_DIR/datadog-dogstatsd_7*_arm64.deb /tmp/arm64-deb/dogstatsd > /dev/null
- - add_metrics /tmp/arm64-deb debian arm64
-
- # centos
- - cd /tmp/amd64-rpm/agent && rpm2cpio $OMNIBUS_PACKAGE_DIR/datadog-agent-7.*.x86_64.rpm | cpio -idm > /dev/null
- - cd /tmp/amd64-rpm/dogstatsd && rpm2cpio $OMNIBUS_PACKAGE_DIR/datadog-dogstatsd-7.*.x86_64.rpm | cpio -idm > /dev/null
- - cd /tmp/amd64-rpm/iot-agent && rpm2cpio $OMNIBUS_PACKAGE_DIR/datadog-iot-agent-7.*.x86_64.rpm | cpio -idm > /dev/null
- - add_metrics /tmp/amd64-rpm centos amd64
-
- # centos arm64
- - cd /tmp/arm64-rpm/agent && rpm2cpio $OMNIBUS_PACKAGE_DIR/datadog-agent-7.*.aarch64.rpm | cpio -idm > /dev/null
- - cd /tmp/arm64-rpm/iot-agent && rpm2cpio $OMNIBUS_PACKAGE_DIR/datadog-iot-agent-7.*.aarch64.rpm | cpio -idm > /dev/null
- - add_metrics /tmp/arm64-rpm centos arm64
-
- # suse
- - cd /tmp/amd64-suse/agent && rpm2cpio $OMNIBUS_PACKAGE_DIR_SUSE/datadog-agent-7.*.x86_64.rpm | cpio -idm > /dev/null
- - cd /tmp/amd64-suse/dogstatsd && rpm2cpio $OMNIBUS_PACKAGE_DIR_SUSE/datadog-dogstatsd-7.*.x86_64.rpm | cpio -idm > /dev/null
- - cd /tmp/amd64-suse/iot-agent && rpm2cpio $OMNIBUS_PACKAGE_DIR_SUSE/datadog-iot-agent-7.*.x86_64.rpm | cpio -idm > /dev/null
- - add_metrics /tmp/amd64-suse suse amd64
-
- # Send package and binary size metrics
- - !reference [.send_metrics, script]
-
.check_pkg_size:
stage: pkg_metrics
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
@@ -248,7 +150,9 @@ send_pkg_size-a7:
check_pkg_size-amd64-a6:
extends: .check_pkg_size
- rules: !reference [.on_a6]
+ rules:
+ - !reference [.except_mergequeue]
+ - when: on_success
needs:
- agent_deb-x64-a6
- agent_rpm-x64-a6
@@ -267,7 +171,7 @@ check_pkg_size-amd64-a6:
check_pkg_size-arm64-a6:
extends: .check_pkg_size
- rules: !reference [.on_all_builds_a6]
+ rules: !reference [.on_all_builds]
needs:
- agent_deb-arm64-a6
- agent_rpm-arm64-a6
@@ -282,63 +186,3 @@ check_pkg_size-arm64-a6:
declare -Ar max_sizes=(
["datadog-agent"]="140000000"
)
-
-check_pkg_size-amd64-a7:
- extends: .check_pkg_size
- rules: !reference [.on_a7]
- needs:
- - agent_deb-x64-a7
- - iot_agent_deb-x64
- - dogstatsd_deb-x64
- - agent_heroku_deb-x64-a7
- - agent_rpm-x64-a7
- - iot_agent_rpm-x64
- - dogstatsd_rpm-x64
- - agent_suse-x64-a7
- - dogstatsd_suse-x64
- - iot_agent_suse-x64
- - agentless_scanner_deb-x64
- - agentless_scanner_rpm-x64
- - agentless_scanner_suse-x64
- variables:
- MAJOR_VERSION: 7
- FLAVORS: "datadog-agent datadog-iot-agent datadog-dogstatsd datadog-heroku-agent"
- ARCH: "amd64"
- before_script:
- # FIXME: ["datadog-agent"]="140000000" and ["datadog-heroku-agent"]="140000000" should
- # be replaced by "50000000"
- # "70000000" is needed as of now because of multiple large additions in 7.45
- - |
- declare -Ar max_sizes=(
- ["datadog-agent"]="140000000"
- ["datadog-iot-agent"]="10000000"
- ["datadog-dogstatsd"]="10000000"
- ["datadog-heroku-agent"]="70000000"
- ["datadog-agentless-scanner"]="10000000"
- )
-
-check_pkg_size-arm64-a7:
- extends: .check_pkg_size
- rules: !reference [.on_all_builds_a7]
- needs:
- - agent_deb-arm64-a7
- - iot_agent_deb-arm64
- - dogstatsd_deb-arm64
- - agent_rpm-arm64-a7
- - iot_agent_rpm-arm64
- - agentless_scanner_deb-arm64
- - agentless_scanner_rpm-arm64
- variables:
- MAJOR_VERSION: 7
- FLAVORS: "datadog-agent datadog-iot-agent datadog-dogstatsd"
- ARCH: "arm64"
- before_script:
- # FIXME: ["datadog-agent"]="140000000" should be replaced by "70000000"
- # "140000000" is needed as of now because of multiple large additions in 7.45
- - |
- declare -Ar max_sizes=(
- ["datadog-agent"]="140000000"
- ["datadog-iot-agent"]="10000000"
- ["datadog-dogstatsd"]="10000000"
- ["datadog-agentless-scanner"]="10000000"
- )
diff --git a/.gitlab/setup/setup.yml b/.gitlab/setup/setup.yml
index 27fbc51cf5290..0f6266950cb0f 100644
--- a/.gitlab/setup/setup.yml
+++ b/.gitlab/setup/setup.yml
@@ -5,7 +5,7 @@ setup_agent_version:
tags: ["arch:amd64"]
script:
- source /root/.bashrc
- - inv -e agent.version --version-cached
+ - inv -e agent.version -m 6 --version-cached
- $S3_CP_CMD $CI_PROJECT_DIR/agent-version.cache $S3_ARTIFACTS_URI/agent-version.cache
needs: []
diff --git a/.gitlab/source_test/include.yml b/.gitlab/source_test/include.yml
index e31cc06505487..807ad28407f8d 100644
--- a/.gitlab/source_test/include.yml
+++ b/.gitlab/source_test/include.yml
@@ -4,11 +4,10 @@
# security scans & go.mod checks.
include:
- - /.gitlab/source_test/ebpf.yml
- - /.gitlab/source_test/linux.yml
- - /.gitlab/source_test/macos.yml
- - /.gitlab/source_test/windows.yml
- - /.gitlab/source_test/go_generate_check.yml
- - /.gitlab/source_test/slack.yml
- - /.gitlab/source_test/golang_deps_diff.yml
- - /.gitlab/source_test/notify.yml
+ - .gitlab/source_test/ebpf.yml
+ - .gitlab/source_test/linux.yml
+ - .gitlab/source_test/windows.yml
+ - .gitlab/source_test/go_generate_check.yml
+ - .gitlab/source_test/slack.yml
+ - .gitlab/source_test/golang_deps_diff.yml
+ - .gitlab/source_test/notify.yml
diff --git a/.gitlab/source_test/linux.yml b/.gitlab/source_test/linux.yml
index 47cfa3af1a473..4684816b15079 100644
--- a/.gitlab/source_test/linux.yml
+++ b/.gitlab/source_test/linux.yml
@@ -83,7 +83,8 @@ tests_deb-x64-py2:
- .linux_x64
rules:
- !reference [.except_disable_unit_tests]
- - !reference [.on_a6]
+ - !reference [.except_mergequeue]
+ - when: on_success
variables:
PYTHON_RUNTIMES: '2'
CONDA_ENV: ddpy2
@@ -101,39 +102,6 @@ lint_linux-x64:
extends:
- .linux_lint
- .linux_x64
-tests_flavor_iot_deb-x64:
- extends:
- - .rtloader_tests
- - .linux_tests_with_upload
- - .linux_x64
- variables:
- PYTHON_RUNTIMES: '3'
- CONDA_ENV: ddpy3
- FLAVORS: '--flavors iot'
-
-lint_flavor_iot_linux-x64:
- extends:
- - .linux_lint
- - .linux_x64
- variables:
- FLAVORS: '--flavors iot'
-
-tests_flavor_dogstatsd_deb-x64:
- extends:
- - .rtloader_tests
- - .linux_tests_with_upload
- - .linux_x64
- variables:
- PYTHON_RUNTIMES: '3'
- CONDA_ENV: ddpy3
- FLAVORS: '--flavors dogstatsd'
-
-lint_flavor_dogstatsd_linux-x64:
- extends:
- - .linux_lint
- - .linux_x64
- variables:
- FLAVORS: '--flavors dogstatsd'
tests_flavor_heroku_deb-x64:
extends:
@@ -159,8 +127,9 @@ tests_rpm-x64-py2:
extends: .rtloader_tests
rules:
- !reference [.except_disable_unit_tests]
- - !reference [.on_a6]
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_x64_testing$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
+ - !reference [.except_mergequeue]
+ - when: on_success
+ image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
variables:
PYTHON_RUNTIMES: '2'
@@ -171,7 +140,7 @@ tests_rpm-x64-py3:
extends:
- .rtloader_tests
- .linux_tests_with_upload
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_x64_testing$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
+ image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:amd64"]
variables:
PYTHON_RUNTIMES: '3'
@@ -182,7 +151,8 @@ tests_deb-arm64-py2:
extends: .rtloader_tests
rules:
- !reference [.except_disable_unit_tests]
- - !reference [.on_a6]
+ - !reference [.except_mergequeue]
+ - when: on_success
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_arm64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:arm64"]
variables:
@@ -209,7 +179,8 @@ tests_rpm-arm64-py2:
extends: .rtloader_tests
rules:
- !reference [.except_disable_unit_tests]
- - !reference [.on_a6]
+ - !reference [.except_mergequeue]
+ - when: on_success
image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/rpm_arm64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
tags: ["arch:arm64"]
variables:
diff --git a/.gitlab/source_test/macos.yml b/.gitlab/source_test/macos.yml
deleted file mode 100644
index 9ed09693ca7f9..0000000000000
--- a/.gitlab/source_test/macos.yml
+++ /dev/null
@@ -1,53 +0,0 @@
-tests_macos:
- stage: source_test
- # HACK: Run macOS unit tests only on full pipelines, to limit the use of macOS GitHub runners.
- rules: !reference [.on_main_or_release_branch_or_all_builds]
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- variables:
- PYTHON_RUNTIMES: "3"
- script:
- - source /root/.bashrc
- - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_SSM_NAME)
- - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_SSM_NAME)
- - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_SSM_NAME)
- - $S3_CP_CMD $S3_ARTIFACTS_URI/agent-version.cache .
- - export VERSION_CACHE_CONTENT=$(cat agent-version.cache | base64 -)
- - !reference [.setup_python_mirror_linux]
- - python3 -m pip install -r tasks/libs/requirements-github.txt
- - FAST_TESTS_FLAG=""
- - if [[ "$FAST_TESTS" = "true" ]]; then FAST_TESTS_FLAG="--fast-tests true"; fi
- - inv -e github.trigger-macos --workflow-type "test" --datadog-agent-ref "$CI_COMMIT_SHA" --python-runtimes "$PYTHON_RUNTIMES" --version-cache "$VERSION_CACHE_CONTENT" $FAST_TESTS_FLAG
- timeout: 6h
- after_script:
- - source /root/.bashrc
- # Upload generated junit files
- - export DATADOG_API_KEY=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh ci.datadog-agent.datadog_api_key_org2)
- - for f in junit-*.tgz; do inv -e junit-upload --tgz-path $f; done
- artifacts:
- expire_in: 2 weeks
- when: always
- paths:
- - test_output.json
- - junit-*-repacked.tgz
- reports:
- junit: "**/junit-out-*.xml"
-
-lint_macos:
- stage: source_test
- rules: !reference [.on_a7]
- image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES
- tags: ["arch:amd64"]
- variables:
- PYTHON_RUNTIMES: "3"
- timeout: 6h
- script:
- - source /root/.bashrc
- - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_KEY_SSM_NAME)
- - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_APP_ID_SSM_NAME)
- - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $MACOS_GITHUB_INSTALLATION_ID_SSM_NAME)
- - $S3_CP_CMD $S3_ARTIFACTS_URI/agent-version.cache .
- - export VERSION_CACHE_CONTENT=$(cat agent-version.cache | base64 -)
- - !reference [.setup_python_mirror_linux]
- - python3 -m pip install -r tasks/libs/requirements-github.txt
- - inv -e github.trigger-macos --workflow-type "lint" --datadog-agent-ref "$CI_COMMIT_SHA" --python-runtimes "$PYTHON_RUNTIMES" --version-cache "$VERSION_CACHE_CONTENT"
diff --git a/.gitlab/source_test/notify.yml b/.gitlab/source_test/notify.yml
index 1db5134a1f4e3..e6bbf58bc52a8 100644
--- a/.gitlab/source_test/notify.yml
+++ b/.gitlab/source_test/notify.yml
@@ -19,7 +19,5 @@ unit_tests_notify:
- tests_rpm-x64-py3
- tests_rpm-arm64-py3
- tests_windows-x64
- - tests_flavor_iot_deb-x64
- - tests_flavor_dogstatsd_deb-x64
- tests_flavor_heroku_deb-x64
allow_failure: true
diff --git a/.gitlab/trigger_release/trigger_release.yml b/.gitlab/trigger_release/trigger_release.yml
index 11b204e7c7a9c..5400e6eb9d152 100644
--- a/.gitlab/trigger_release/trigger_release.yml
+++ b/.gitlab/trigger_release/trigger_release.yml
@@ -16,10 +16,9 @@
RELEASE_PRODUCT: datadog-agent
TARGET_REPO_BRANCH: $BUCKET_BRANCH
script:
- # agent-release-management creates pipeline for both Agent 6 and Agent 7
- # when triggered with major version 7
+ # agent-release-management creates pipeline for Agent 6
- source /root/.bashrc
- - export RELEASE_VERSION=$(inv agent.version --major-version 7 --url-safe --omnibus-format)-1
+ - export RELEASE_VERSION=$(inv agent.version --major-version 6 --url-safe --omnibus-format)-1
- export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/aws_ssm_get_wrapper.sh $GITLAB_SCHEDULER_TOKEN_SSM_NAME)
- 'inv pipeline.trigger-child-pipeline --project-name "DataDog/agent-release-management" --git-ref "main"
--variable ACTION
diff --git a/.golangci.yml b/.golangci.yml
index 9f70c7a689bb7..96d0592b40872 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,12 +1,3 @@
-run:
- skip-files:
- - pkg/util/cloudproviders/cloudfoundry/bbscache_test.go # implements interface from imported package whose method names fail linting
- - pkg/util/intern/string.go # TODO: fix govet 'unsafeptr' error
- - pkg/serverless/trace/inferredspan/constants.go # TODO: fox revive exported const error
- skip-dirs:
- - pkg/proto/patches
-
-
issues:
exclude-use-default: false
# Do not limit the number of issues per linter.
@@ -15,9 +6,20 @@ issues:
# Do not limit the number of times a same issue is reported.
max-same-issues: 0
+ exclude-files:
+ - pkg/util/cloudproviders/cloudfoundry/bbscache_test.go # implements interface from imported package whose method names fail linting
+ - pkg/util/intern/string.go # TODO: fix govet 'unsafeptr' error
+ - pkg/serverless/trace/inferredspan/constants.go # TODO: fox revive exported const error
+
+ exclude-dirs:
+ - pkg/proto/patches
+ - tasks/unit_tests/testdata/components_src
+
exclude:
- "Error return value of `io.WriteString` is not checked" # 'errcheck' errors in tools/dep_tree_resolver/go_deps.go
- "Error return value of `pem.Encode` is not checked" # 'errcheck' errors in test/integration/utils/certificates.go
+ - "Error return value of `c.logErrorNotImplemented` is not checked" # 'errcheck' errors in pkg/config/nodetreemodel/config.go
+ - "Error return value of `n.logErrorNotImplemented` is not checked" # 'errcheck' errors in pkg/config/nodetreemodel/config.go
- "exported: exported const Exec should have comment \\(or a comment on this block\\) or be unexported" # 'revive' errors in pkg/process/events/model/model_common.go
- "exported: exported const APIName should have comment \\(or a comment on this block\\) or be unexported" # 'revive' errors in pkg/serverless/trace/inferredspan/constants.go
- "unnecessary conversion" # 'unconvert' errors in test/integration/utils/certificates_test.go
@@ -39,8 +41,6 @@ issues:
# disable typecheck in folder where it breaks because of build tags
- path: "pkg/security/"
linters: [typecheck]
- - path: "pkg/process/"
- linters: [typecheck]
# Ignore name repetition for checks (docker.Docker*, jmx.JMX*, etc.)
- path: pkg/collector/corechecks/
text: "name will be used as .* by other packages, and that stutters"
@@ -61,22 +61,21 @@ issues:
# We are using it and it's not clear how to replace it.
- text: "Temporary has been deprecated since Go 1.18"
linters: [staticcheck]
-
linters:
disable-all: true
enable:
- - unconvert # Remove unnecessary type conversions
- - unused # Checks Go code for unused constants, variables, functions and types
- - ineffassign # Detects when assignments to existing variables are not used
- - misspell # Finds commonly misspelled English words in comments
- - gofmt # Gofmt checks whether code was gofmt-ed
- - revive # Revive is a replacement for golint, a coding style checker
- - errcheck # errcheck is a program for checking for unchecked errors in go programs.
- - staticcheck # staticcheck is a go vet on steroids, applying a ton of static analysis checks
- - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
- - depguard # Depguard is useful for preventing specific packages from being used
- - bodyclose # checks whether HTTP response body is closed successfully
- - gosimple # Linter for Go source code that specializes in simplifying code.
+ - unconvert # Remove unnecessary type conversions
+ - unused # Checks Go code for unused constants, variables, functions and types
+ - ineffassign # Detects when assignments to existing variables are not used
+ - misspell # Finds commonly misspelled English words in comments
+ - gofmt # Gofmt checks whether code was gofmt-ed
+ - revive # Revive is a replacement for golint, a coding style checker
+ - errcheck # errcheck is a program for checking for unchecked errors in go programs.
+ - staticcheck # staticcheck is a go vet on steroids, applying a ton of static analysis checks
+ - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
+ - depguard # Depguard is useful for preventing specific packages from being used
+ - bodyclose # checks whether HTTP response body is closed successfully
+ - gosimple # Linter for Go source code that specializes in simplifying code.
linters-settings:
depguard:
@@ -85,8 +84,6 @@ linters-settings:
files:
- $all
deny:
- - pkg: "sync/atomic"
- desc: "Use go.uber.org/atomic instead; see docs/dev/atomics.md"
- pkg: "io/ioutil"
desc: "Deprecated since Go 1.16. Use package io or os instead."
- pkg: "github.com/golang/glog"
@@ -97,11 +94,63 @@ linters-settings:
# the goal is just to limit the risk of accidental imports
- pkg: "gotest.tools/assert"
desc: "Not really forbidden to use, but it is usually imported by mistake instead of github.com/stretchr/testify/assert"
+ - pkg: "github.com/tj/assert"
+ desc: "Not really forbidden to use, but it is usually imported by mistake instead of github.com/stretchr/testify/assert, and confusing since it actually has the behavior of github.com/stretchr/testify/require"
errcheck:
- # Disable warnings for `fmt`, `log` and `seelog` packages. Also ignore `Write` functions from `net/http` package.
- # Disable warnings for select Windows functions
- ignore: fmt:.*,github.com/DataDog/datadog-agent/pkg/util/log:.*,github.com/DataDog/datadog-agent/comp/core/log:.*,github.com/cihub/seelog:.*,net/http:Write,github.com/DataDog/datadog-agent/pkg/trace/metrics:.*,github.com/DataDog/datadog-agent/pkg/collector/corechecks:Warnf?,golang.org/x/sys/windows:(CloseHandle|FreeLibrary|FreeSid|RegCloseKey|SetEvent|LocalFree),syscall:CloseHandle,golang.org/x/sys/windows/svc/mgr:Disconnect,golang.org/x/sys/windows/svc/debug:(Close|Error|Info|Warning),github.com/lxn/walk:Dispose,github.com/DataDog/datadog-agent/comp/core/flare/types:(AddFile.*|CopyDir.*|CopyFile.*),golang.org/x/sys/windows/registry:Close
+ ignore: "github.com/DataDog/datadog-agent/pkg/util/log:.*"
+ exclude-functions:
+ - (*github.com/DataDog/datadog-agent/pkg/collector/corechecks.CheckBase).Warn
+ - (*github.com/DataDog/datadog-agent/pkg/collector/corechecks.CheckBase).Warnf
+ - (*github.com/lxn/walk.NotifyIcon).Dispose
+ - (*golang.org/x/sys/windows/svc/mgr.Mgr).Disconnect
+ - (github.com/DataDog/datadog-agent/comp/core/flare/types.FlareBuilder).AddFile
+ - (github.com/DataDog/datadog-agent/comp/core/flare/types.FlareBuilder).AddFileFromFunc
+ - (github.com/DataDog/datadog-agent/comp/core/flare/types.FlareBuilder).AddFileWithoutScrubbing
+ - (github.com/DataDog/datadog-agent/comp/core/flare/types.FlareBuilder).CopyDir
+ - (github.com/DataDog/datadog-agent/comp/core/flare/types.FlareBuilder).CopyDirTo
+ - (github.com/DataDog/datadog-agent/comp/core/flare/types.FlareBuilder).CopyDirToWithoutScrubbing
+ - (github.com/DataDog/datadog-agent/comp/core/flare/types.FlareBuilder).CopyFile
+ - (github.com/DataDog/datadog-agent/comp/core/flare/types.FlareBuilder).CopyFileTo
+ - (github.com/DataDog/datadog-agent/comp/core/log.Component).ChangeLogLevel
+ - (github.com/DataDog/datadog-agent/comp/core/log.Component).Critical
+ - (github.com/DataDog/datadog-agent/comp/core/log.Component).Criticalf
+ - (github.com/DataDog/datadog-agent/comp/core/log.Component).Error
+ - (github.com/DataDog/datadog-agent/comp/core/log.Component).Errorf
+ - (github.com/DataDog/datadog-agent/comp/core/log.Component).Warn
+ - (github.com/DataDog/datadog-agent/comp/core/log.Component).Warnf
+ - (net/http.ResponseWriter).Write
+ - fmt.Sscanf
+ - github.com/cihub/seelog.Warnf
+ - github.com/DataDog/datadog-agent/pkg/util/log.ChangeLogLevel
+ - github.com/DataDog/datadog-agent/pkg/util/log.Critical
+ - github.com/DataDog/datadog-agent/pkg/util/log.Criticalc
+ - github.com/DataDog/datadog-agent/pkg/util/log.Criticalf
+ - github.com/DataDog/datadog-agent/pkg/util/log.CriticalStackDepth
+ - github.com/DataDog/datadog-agent/pkg/util/log.Error
+ - github.com/DataDog/datadog-agent/pkg/util/log.Errorc
+ - github.com/DataDog/datadog-agent/pkg/util/log.Errorf
+ - github.com/DataDog/datadog-agent/pkg/util/log.ErrorStackDepth
+ - github.com/DataDog/datadog-agent/pkg/util/log.JMXError
+ - github.com/DataDog/datadog-agent/pkg/util/log.logContextWithError
+ - github.com/DataDog/datadog-agent/pkg/util/log.logFormatWithError
+ - github.com/DataDog/datadog-agent/pkg/util/log.Warn
+ - github.com/DataDog/datadog-agent/pkg/util/log.Warnc
+ - github.com/DataDog/datadog-agent/pkg/util/log.Warnf
+ - github.com/DataDog/datadog-agent/pkg/util/log.WarnStackDepth
+ - golang.org/x/sys/windows.CloseHandle
+ - golang.org/x/sys/windows.FreeLibrary
+ - golang.org/x/sys/windows.FreeSid
+ - golang.org/x/sys/windows.LocalFree
+ - golang.org/x/sys/windows.RegCloseKey
+ - golang.org/x/sys/windows.SetEvent
+ - golang.org/x/sys/windows/registry.Close
+ - golang.org/x/sys/windows/svc/debug.Close
+ - golang.org/x/sys/windows/svc/debug.Error
+ - golang.org/x/sys/windows/svc/debug.Info
+ - golang.org/x/sys/windows/svc/debug.Warning
+ - pkg/util/log.JMXError
+ - syscall.CloseHandle
staticcheck:
checks: ["all",
"-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", # These ones are disabled by default on staticcheck
@@ -109,7 +158,8 @@ linters-settings:
# Actual issues that should be fixed eventually
"-SA6002", # TODO: Fix sync.Pools
"-SA4025", # TODO: Fix trace unit test
- "-SA4011", "-SA4031" # Disabling these to re-enable golanci-lint default tests
+ "-SA4011", "-SA4031", # Disabling these to re-enable golanci-lint default tests
+ "-SA4023" # Fix the lint_macos_gitlab_amd64 linter discrepancy while we find the issue (see https://github.com/dominikh/go-tools/issues/847)
]
govet:
settings:
@@ -124,11 +174,11 @@ linters-settings:
revive:
# in order to change revive config, the default configuration must be explicitly defined
# https://github.com/mgechev/revive/blob/master/defaults.toml
- ignoreGeneratedHeader: false
+ #
+ # see https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml for the golangci-lint config syntax
+ ignore-generated-header: false
severity: "warning"
confidence: 0.8
- errorCode: 0
- warningCode: 0
rules:
- name: blank-imports
- name: context-as-argument
@@ -150,7 +200,9 @@ linters-settings:
- name: time-naming
- name: unexported-return
- name: unreachable-code
- - name: unused-parameter
+ # - name: unused-parameter
+ # arguments:
+ # - allowRegex: "^_"
- name: var-declaration
- name: var-naming
# non-default rules:
diff --git a/cmd/cluster-agent/api/v1/languagedetection/util.go b/cmd/cluster-agent/api/v1/languagedetection/util.go
index 904d0366ca249..eb2442558c5f4 100644
--- a/cmd/cluster-agent/api/v1/languagedetection/util.go
+++ b/cmd/cluster-agent/api/v1/languagedetection/util.go
@@ -171,9 +171,9 @@ func (ownersLanguages *OwnersLanguages) handleKubeAPIServerUnsetEvents(events []
case workloadmeta.KindKubernetesDeployment:
// extract deployment name and namespace from entity id
deployment := event.Entity.(*workloadmeta.KubernetesDeployment)
- deploymentIds := strings.Split(deployment.GetID().ID, "/")
- namespace := deploymentIds[0]
- deploymentName := deploymentIds[1]
+ deploymentIDs := strings.Split(deployment.GetID().ID, "/")
+ namespace := deploymentIDs[0]
+ deploymentName := deploymentIDs[1]
delete(ownersLanguages.containersLanguages, langUtil.NewNamespacedOwnerReference("apps/v1", langUtil.KindDeployment, deploymentName, namespace))
_ = wlm.Push(workloadmeta.SourceLanguageDetectionServer, workloadmeta.Event{
Type: workloadmeta.EventTypeUnset,
diff --git a/internal/tools/go.mod b/internal/tools/go.mod
index 12c5fb5f7ee05..11574850fca41 100644
--- a/internal/tools/go.mod
+++ b/internal/tools/go.mod
@@ -5,7 +5,7 @@ go 1.21.9
require (
github.com/frapposelli/wwhrd v0.4.0
github.com/go-enry/go-license-detector/v4 v4.3.0
- github.com/golangci/golangci-lint v1.54.2
+ github.com/golangci/golangci-lint v1.59.1
github.com/goware/modvendor v0.5.0
github.com/stormcat24/protodep v0.1.8
github.com/vektra/mockery/v2 v2.40.1
@@ -18,19 +18,22 @@ require (
require (
4d63.com/gocheckcompilerdirectives v1.2.1 // indirect
4d63.com/gochecknoglobals v0.2.1 // indirect
- github.com/4meepo/tagalign v1.3.2 // indirect
- github.com/Abirdcfly/dupword v0.0.12 // indirect
- github.com/Antonboom/errname v0.1.12 // indirect
- github.com/Antonboom/nilnil v0.1.7 // indirect
- github.com/BurntSushi/toml v1.3.2 // indirect
+ github.com/4meepo/tagalign v1.3.4 // indirect
+ github.com/Abirdcfly/dupword v0.0.14 // indirect
+ github.com/Antonboom/errname v0.1.13 // indirect
+ github.com/Antonboom/nilnil v0.1.9 // indirect
+ github.com/Antonboom/testifylint v1.3.1 // indirect
+ github.com/BurntSushi/toml v1.4.0 // indirect
+ github.com/Crocmagnon/fatcontext v0.2.2 // indirect
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
- github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0 // indirect
- github.com/Masterminds/semver v1.5.0 // indirect
+ github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 // indirect
+ github.com/Masterminds/semver/v3 v3.2.1 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
- github.com/OpenPeeDeeP/depguard/v2 v2.1.0 // indirect
+ github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20230528122434-6f98819771a1 // indirect
github.com/acomagu/bufpipe v1.0.4 // indirect
- github.com/alexkohler/nakedret/v2 v2.0.2 // indirect
+ github.com/alecthomas/go-check-sumtype v0.1.4 // indirect
+ github.com/alexkohler/nakedret/v2 v2.0.4 // indirect
github.com/alexkohler/prealloc v1.0.0 // indirect
github.com/alingse/asasalint v0.0.11 // indirect
github.com/ashanbrown/forbidigo v1.6.0 // indirect
@@ -39,71 +42,70 @@ require (
github.com/bitfield/gotestdox v0.2.1 // indirect
github.com/bkielbasa/cyclop v1.2.1 // indirect
github.com/blizzy78/varnamelen v0.8.0 // indirect
- github.com/bombsimon/wsl/v3 v3.4.0 // indirect
- github.com/breml/bidichk v0.2.4 // indirect
- github.com/breml/errchkjson v0.3.1 // indirect
+ github.com/bombsimon/wsl/v4 v4.2.1 // indirect
+ github.com/breml/bidichk v0.2.7 // indirect
+ github.com/breml/errchkjson v0.3.6 // indirect
github.com/briandowns/spinner v1.23.0 // indirect
- github.com/butuzov/ireturn v0.2.0 // indirect
- github.com/butuzov/mirror v1.1.0 // indirect
- github.com/ccojocar/zxcvbn-go v1.0.1 // indirect
+ github.com/butuzov/ireturn v0.3.0 // indirect
+ github.com/butuzov/mirror v1.2.0 // indirect
+ github.com/catenacyber/perfsprint v0.7.1 // indirect
+ github.com/ccojocar/zxcvbn-go v1.0.2 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/charithe/durationcheck v0.0.10 // indirect
- github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 // indirect
+ github.com/chavacava/garif v0.1.0 // indirect
github.com/chigopher/pathlib v0.19.1 // indirect
+ github.com/ckaznocha/intrange v0.1.2 // indirect
github.com/cloudflare/circl v1.3.3 // indirect
github.com/curioswitch/go-reassign v0.2.0 // indirect
- github.com/daixiang0/gci v0.11.0 // indirect
+ github.com/daixiang0/gci v0.13.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/denis-tingaikin/go-header v0.4.3 // indirect
+ github.com/denis-tingaikin/go-header v0.5.0 // indirect
github.com/dgryski/go-minhash v0.0.0-20170608043002-7fe510aff544 // indirect
github.com/dnephin/pflag v1.0.7 // indirect
github.com/ekzhu/minhash-lsh v0.0.0-20171225071031-5c06ee8586a1 // indirect
github.com/emicklei/dot v0.15.0 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
- github.com/esimonov/ifshort v1.0.4 // indirect
- github.com/ettle/strcase v0.1.1 // indirect
- github.com/fatih/color v1.15.0 // indirect
+ github.com/ettle/strcase v0.2.0 // indirect
+ github.com/fatih/color v1.17.0 // indirect
github.com/fatih/structtag v1.2.0 // indirect
- github.com/firefart/nonamedreturns v1.0.4 // indirect
+ github.com/firefart/nonamedreturns v1.0.5 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/fzipp/gocyclo v0.6.0 // indirect
- github.com/go-critic/go-critic v0.9.0 // indirect
+ github.com/ghostiam/protogetter v0.3.6 // indirect
+ github.com/go-critic/go-critic v0.11.4 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.4.1 // indirect
github.com/go-git/go-git/v5 v5.7.0 // indirect
github.com/go-toolsmith/astcast v1.1.0 // indirect
github.com/go-toolsmith/astcopy v1.1.0 // indirect
- github.com/go-toolsmith/astequal v1.1.0 // indirect
+ github.com/go-toolsmith/astequal v1.2.0 // indirect
github.com/go-toolsmith/astfmt v1.1.0 // indirect
github.com/go-toolsmith/astp v1.1.0 // indirect
github.com/go-toolsmith/strparse v1.1.0 // indirect
github.com/go-toolsmith/typep v1.1.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.0.0 // indirect
github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/mock v1.6.0 // indirect
- github.com/golang/protobuf v1.5.2 // indirect
- github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect
+ github.com/golang/protobuf v1.5.3 // indirect
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
- github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect
- github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect
- github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect
- github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect
- github.com/golangci/misspell v0.4.1 // indirect
- github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect
- github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect
- github.com/google/go-cmp v0.5.9 // indirect
+ github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e // indirect
+ github.com/golangci/misspell v0.6.0 // indirect
+ github.com/golangci/modinfo v0.3.4 // indirect
+ github.com/golangci/plugin-module-register v0.1.1 // indirect
+ github.com/golangci/revgrep v0.5.3 // indirect
+ github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect
+ github.com/google/go-cmp v0.6.0 // indirect
github.com/google/licensecheck v0.3.1 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
- github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 // indirect
+ github.com/gordonklaus/ineffassign v0.1.0 // indirect
github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
github.com/gostaticanalysis/comment v1.4.2 // indirect
github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect
github.com/gostaticanalysis/nilerr v0.1.1 // indirect
- github.com/hashicorp/errwrap v1.0.0 // indirect
- github.com/hashicorp/go-multierror v1.1.1 // indirect
- github.com/hashicorp/go-version v1.6.0 // indirect
+ github.com/hashicorp/go-version v1.7.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hexops/gotextdiff v1.0.3 // indirect
github.com/hhatto/gorst v0.0.0-20181029133204-ca9f730cac5b // indirect
@@ -114,74 +116,77 @@ require (
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/jdkato/prose v1.1.0 // indirect
github.com/jessevdk/go-flags v1.5.0 // indirect
- github.com/jgautheron/goconst v1.5.1 // indirect
+ github.com/jgautheron/goconst v1.7.1 // indirect
github.com/jingyugao/rowserrcheck v1.1.1 // indirect
github.com/jinzhu/copier v0.3.5 // indirect
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect
+ github.com/jjti/go-spancheck v0.6.1 // indirect
github.com/julz/importas v0.1.0 // indirect
+ github.com/karamaru-alpha/copyloopvar v1.1.0 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
- github.com/kisielk/errcheck v1.6.3 // indirect
- github.com/kisielk/gotool v1.0.0 // indirect
- github.com/kkHAIKE/contextcheck v1.1.4 // indirect
+ github.com/kisielk/errcheck v1.7.0 // indirect
+ github.com/kkHAIKE/contextcheck v1.1.5 // indirect
github.com/kulti/thelper v0.6.3 // indirect
- github.com/kunwardeep/paralleltest v1.0.8 // indirect
+ github.com/kunwardeep/paralleltest v1.0.10 // indirect
github.com/kyoh86/exportloopref v0.1.11 // indirect
- github.com/ldez/gomoddirectives v0.2.3 // indirect
+ github.com/lasiar/canonicalheader v1.1.1 // indirect
+ github.com/ldez/gomoddirectives v0.2.4 // indirect
github.com/ldez/tagliatelle v0.5.0 // indirect
- github.com/leonklingele/grouper v1.1.1 // indirect
+ github.com/leonklingele/grouper v1.1.2 // indirect
github.com/lufeee/execinquery v1.2.1 // indirect
+ github.com/macabu/inamedparam v0.1.3 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/maratori/testableexamples v1.0.0 // indirect
github.com/maratori/testpackage v1.1.1 // indirect
github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
- github.com/mattn/go-isatty v0.0.19 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/mattn/go-zglob v0.0.2-0.20191112051448-a8912a37f9e7 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
- github.com/mbilski/exhaustivestruct v1.2.0 // indirect
- github.com/mgechev/revive v1.3.2 // indirect
+ github.com/mgechev/revive v1.3.7 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/montanaflynn/stats v0.0.0-20151014174947-eeaced052adb // indirect
github.com/moricho/tparallel v0.3.1 // indirect
github.com/nakabonne/nestif v0.3.1 // indirect
- github.com/nishanths/exhaustive v0.11.0 // indirect
+ github.com/nishanths/exhaustive v0.12.0 // indirect
github.com/nishanths/predeclared v0.2.2 // indirect
- github.com/nunnatsa/ginkgolinter v0.13.5 // indirect
+ github.com/nunnatsa/ginkgolinter v0.16.2 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
- github.com/pelletier/go-toml/v2 v2.0.6 // indirect
+ github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/polyfloyd/go-errorlint v1.4.4 // indirect
+ github.com/polyfloyd/go-errorlint v1.5.2 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
- github.com/quasilyte/go-ruleguard v0.4.0 // indirect
+ github.com/quasilyte/go-ruleguard v0.4.2 // indirect
+ github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect
github.com/quasilyte/gogrep v0.5.0 // indirect
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect
github.com/rs/zerolog v1.29.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
- github.com/ryancurrah/gomodguard v1.3.0 // indirect
- github.com/ryanrolds/sqlclosecheck v0.4.0 // indirect
+ github.com/ryancurrah/gomodguard v1.3.2 // indirect
+ github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect
github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect
+ github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect
github.com/sashamelentyev/interfacebloat v1.1.0 // indirect
- github.com/sashamelentyev/usestdlibvars v1.24.0 // indirect
- github.com/securego/gosec/v2 v2.17.0 // indirect
+ github.com/sashamelentyev/usestdlibvars v1.26.0 // indirect
+ github.com/securego/gosec/v2 v2.20.1-0.20240525090044-5f0084eb01a9 // indirect
github.com/sergi/go-diff v1.3.1 // indirect
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
github.com/shogo82148/go-shuffle v0.0.0-20170808115208-59829097ff3b // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/sivchari/containedctx v1.0.3 // indirect
- github.com/sivchari/nosnakecase v1.7.0 // indirect
github.com/sivchari/tenv v1.7.1 // indirect
github.com/skeema/knownhosts v1.1.1 // indirect
github.com/sonatard/noctx v0.0.2 // indirect
github.com/sourcegraph/go-diff v0.7.0 // indirect
- github.com/spf13/afero v1.9.3 // indirect
+ github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/cobra v1.7.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
@@ -194,44 +199,44 @@ require (
github.com/subosito/gotenv v1.4.2 // indirect
github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect
github.com/tdakkota/asciicheck v0.2.0 // indirect
- github.com/tetafro/godot v1.4.14 // indirect
+ github.com/tetafro/godot v1.4.16 // indirect
github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect
github.com/timonwong/loggercheck v0.9.4 // indirect
- github.com/tomarrell/wrapcheck/v2 v2.8.1 // indirect
+ github.com/tomarrell/wrapcheck/v2 v2.8.3 // indirect
github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect
github.com/ultraware/funlen v0.1.0 // indirect
- github.com/ultraware/whitespace v0.0.5 // indirect
- github.com/uudashr/gocognit v1.0.7 // indirect
+ github.com/ultraware/whitespace v0.1.1 // indirect
+ github.com/uudashr/gocognit v1.1.2 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
- github.com/xen0n/gosmopolitan v1.2.1 // indirect
+ github.com/xen0n/gosmopolitan v1.2.2 // indirect
github.com/yagipy/maintidx v1.0.0 // indirect
- github.com/yeya24/promlinter v0.2.0 // indirect
- github.com/ykadowak/zerologlint v0.1.3 // indirect
- gitlab.com/bosi/decorder v0.4.0 // indirect
- go.tmz.dev/musttag v0.7.2 // indirect
+ github.com/yeya24/promlinter v0.3.0 // indirect
+ github.com/ykadowak/zerologlint v0.1.5 // indirect
+ gitlab.com/bosi/decorder v0.4.2 // indirect
+ go-simpler.org/musttag v0.12.2 // indirect
+ go-simpler.org/sloglint v0.7.1 // indirect
go.uber.org/atomic v1.9.0 // indirect
+ go.uber.org/automaxprocs v1.5.3 // indirect
go.uber.org/multierr v1.8.0 // indirect
go.uber.org/zap v1.24.0 // indirect
- golang.org/x/crypto v0.12.0 // indirect
- golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea // indirect
- golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 // indirect
- golang.org/x/mod v0.12.0 // indirect
- golang.org/x/net v0.14.0 // indirect
- golang.org/x/sync v0.3.0 // indirect
- golang.org/x/sys v0.11.0 // indirect
- golang.org/x/term v0.11.0 // indirect
- golang.org/x/text v0.12.0 // indirect
- golang.org/x/tools v0.12.0 // indirect
+ golang.org/x/crypto v0.24.0 // indirect
+ golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc // indirect
+ golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect
+ golang.org/x/mod v0.18.0 // indirect
+ golang.org/x/net v0.26.0 // indirect
+ golang.org/x/sync v0.7.0 // indirect
+ golang.org/x/sys v0.21.0 // indirect
+ golang.org/x/term v0.21.0 // indirect
+ golang.org/x/text v0.16.0 // indirect
+ golang.org/x/tools v0.22.0 // indirect
gonum.org/v1/gonum v0.7.0 // indirect
- google.golang.org/protobuf v1.28.1 // indirect
+ google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/neurosnap/sentences.v1 v1.0.6 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- honnef.co/go/tools v0.4.5 // indirect
- mvdan.cc/gofumpt v0.5.0 // indirect
- mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect
- mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect
- mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d // indirect
+ honnef.co/go/tools v0.4.7 // indirect
+ mvdan.cc/gofumpt v0.6.0 // indirect
+ mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f // indirect
)
diff --git a/internal/tools/go.sum b/internal/tools/go.sum
index 367d2e2c926c9..8b056988fb07d 100644
--- a/internal/tools/go.sum
+++ b/internal/tools/go.sum
@@ -8,7 +8,6 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
@@ -19,9 +18,6 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
-cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
-cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@@ -39,32 +35,35 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/4meepo/tagalign v1.3.2 h1:1idD3yxlRGV18VjqtDbqYvQ5pXqQS0wO2dn6M3XstvI=
-github.com/4meepo/tagalign v1.3.2/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE=
-github.com/Abirdcfly/dupword v0.0.12 h1:56NnOyrXzChj07BDFjeRA+IUzSz01jmzEq+G4kEgFhc=
-github.com/Abirdcfly/dupword v0.0.12/go.mod h1:+us/TGct/nI9Ndcbcp3rgNcQzctTj68pq7TcgNpLfdI=
-github.com/Antonboom/errname v0.1.12 h1:oh9ak2zUtsLp5oaEd/erjB4GPu9w19NyoIskZClDcQY=
-github.com/Antonboom/errname v0.1.12/go.mod h1:bK7todrzvlaZoQagP1orKzWXv59X/x0W0Io2XT1Ssro=
-github.com/Antonboom/nilnil v0.1.7 h1:ofgL+BA7vlA1K2wNQOsHzLJ2Pw5B5DpWRLdDAVvvTow=
-github.com/Antonboom/nilnil v0.1.7/go.mod h1:TP+ScQWVEq0eSIxqU8CbdT5DFWoHp0MbP+KMUO1BKYQ=
+github.com/4meepo/tagalign v1.3.4 h1:P51VcvBnf04YkHzjfclN6BbsopfJR5rxs1n+5zHt+w8=
+github.com/4meepo/tagalign v1.3.4/go.mod h1:M+pnkHH2vG8+qhE5bVc/zeP7HS/j910Fwa9TUSyZVI0=
+github.com/Abirdcfly/dupword v0.0.14 h1:3U4ulkc8EUo+CaT105/GJ1BQwtgyj6+VaBVbAX11Ba8=
+github.com/Abirdcfly/dupword v0.0.14/go.mod h1:VKDAbxdY8YbKUByLGg8EETzYSuC4crm9WwI6Y3S0cLI=
+github.com/Antonboom/errname v0.1.13 h1:JHICqsewj/fNckzrfVSe+T33svwQxmjC+1ntDsHOVvM=
+github.com/Antonboom/errname v0.1.13/go.mod h1:uWyefRYRN54lBg6HseYCFhs6Qjcy41Y3Jl/dVhA87Ns=
+github.com/Antonboom/nilnil v0.1.9 h1:eKFMejSxPSA9eLSensFmjW2XTgTwJMjZ8hUHtV4s/SQ=
+github.com/Antonboom/nilnil v0.1.9/go.mod h1:iGe2rYwCq5/Me1khrysB4nwI7swQvjclR8/YRPl5ihQ=
+github.com/Antonboom/testifylint v1.3.1 h1:Uam4q1Q+2b6H7gvk9RQFw6jyVDdpzIirFOOrbs14eG4=
+github.com/Antonboom/testifylint v1.3.1/go.mod h1:NV0hTlteCkViPW9mSR4wEMfwp+Hs1T3dY60bkvSfhpM=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
-github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
+github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/Crocmagnon/fatcontext v0.2.2 h1:OrFlsDdOj9hW/oBEJBNSuH7QWf+E9WPVHw+x52bXVbk=
+github.com/Crocmagnon/fatcontext v0.2.2/go.mod h1:WSn/c/+MMNiD8Pri0ahRj0o9jVpeowzavOQplBJw6u0=
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM=
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
-github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0 h1:3ZBs7LAezy8gh0uECsA6CGU43FF3zsx5f4eah5FxTMA=
-github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0/go.mod h1:rZLTje5A9kFBe0pzhpe2TdhRniBF++PRHQuRpR8esVc=
+github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 h1:sATXp1x6/axKxz2Gjxv8MALP0bXaNRfQinEwyfMcx8c=
+github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0/go.mod h1:Nl76DrGNJTA1KJ0LePKBw/vznBX1EHbAZX8mwjR82nI=
github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190129172621-c8b1d7a94ddf/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo=
-github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
-github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
+github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
+github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
-github.com/OpenPeeDeeP/depguard/v2 v2.1.0 h1:aQl70G173h/GZYhWf36aE5H0KaujXfVMnn/f1kSDVYY=
-github.com/OpenPeeDeeP/depguard/v2 v2.1.0/go.mod h1:PUBgk35fX4i7JDmwzlJwJ+GMe6NfO1723wmJMgPThNQ=
+github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA=
+github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ=
github.com/ProtonMail/go-crypto v0.0.0-20230528122434-6f98819771a1 h1:JMDGhoQvXNTqH6Y3MC0IUw6tcZvaUdujNqzK2HYWZc8=
github.com/ProtonMail/go-crypto v0.0.0-20230528122434-6f98819771a1/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
github.com/aclements/go-gg v0.0.0-20170118225347-6dbb4e4fefb0/go.mod h1:55qNq4vcpkIuHowELi5C8e+1yUHtoLoOUR9QU5j7Tes=
@@ -73,13 +72,19 @@ github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ
github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
+github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk=
+github.com/alecthomas/assert/v2 v2.2.2/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ=
+github.com/alecthomas/go-check-sumtype v0.1.4 h1:WCvlB3l5Vq5dZQTFmodqL2g68uHiSwwlWcT5a2FGK0c=
+github.com/alecthomas/go-check-sumtype v0.1.4/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ=
+github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk=
+github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/alexkohler/nakedret/v2 v2.0.2 h1:qnXuZNvv3/AxkAb22q/sEsEpcA99YxLFACDtEw9TPxE=
-github.com/alexkohler/nakedret/v2 v2.0.2/go.mod h1:2b8Gkk0GsOrqQv/gPWjNLDSKwG8I5moSXG1K4VIBcTQ=
+github.com/alexkohler/nakedret/v2 v2.0.4 h1:yZuKmjqGi0pSmjGpOC016LtPJysIL0WEUiaXW5SUnNg=
+github.com/alexkohler/nakedret/v2 v2.0.4/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU=
github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw=
github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE=
github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw=
@@ -105,52 +110,54 @@ github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJ
github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM=
github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M=
github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k=
-github.com/bombsimon/wsl/v3 v3.4.0 h1:RkSxjT3tmlptwfgEgTgU+KYKLI35p/tviNXNXiL2aNU=
-github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo=
-github.com/breml/bidichk v0.2.4 h1:i3yedFWWQ7YzjdZJHnPo9d/xURinSq3OM+gyM43K4/8=
-github.com/breml/bidichk v0.2.4/go.mod h1:7Zk0kRFt1LIZxtQdl9W9JwGAcLTTkOs+tN7wuEYGJ3s=
-github.com/breml/errchkjson v0.3.1 h1:hlIeXuspTyt8Y/UmP5qy1JocGNR00KQHgfaNtRAjoxQ=
-github.com/breml/errchkjson v0.3.1/go.mod h1:XroxrzKjdiutFyW3nWhw34VGg7kiMsDQox73yWCGI2U=
+github.com/bombsimon/wsl/v4 v4.2.1 h1:Cxg6u+XDWff75SIFFmNsqnIOgob+Q9hG6y/ioKbRFiM=
+github.com/bombsimon/wsl/v4 v4.2.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo=
+github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY=
+github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ=
+github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA=
+github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U=
github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A=
github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE=
-github.com/butuzov/ireturn v0.2.0 h1:kCHi+YzC150GE98WFuZQu9yrTn6GEydO2AuPLbTgnO4=
-github.com/butuzov/ireturn v0.2.0/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc=
-github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI=
-github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE=
+github.com/butuzov/ireturn v0.3.0 h1:hTjMqWw3y5JC3kpnC5vXmFJAWI/m31jaCYQqzkS6PL0=
+github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA=
+github.com/butuzov/mirror v1.2.0 h1:9YVK1qIjNspaqWutSv8gsge2e/Xpq1eqEkslEUHy5cs=
+github.com/butuzov/mirror v1.2.0/go.mod h1:DqZZDtzm42wIAIyHXeN8W/qb1EPlb9Qn/if9icBOpdQ=
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
-github.com/ccojocar/zxcvbn-go v1.0.1 h1:+sxrANSCj6CdadkcMnvde/GWU1vZiiXRbqYSCalV4/4=
-github.com/ccojocar/zxcvbn-go v1.0.1/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60=
+github.com/catenacyber/perfsprint v0.7.1 h1:PGW5G/Kxn+YrN04cRAZKC+ZuvlVwolYMrIyyTJ/rMmc=
+github.com/catenacyber/perfsprint v0.7.1/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50=
+github.com/ccojocar/zxcvbn-go v1.0.2 h1:na/czXU8RrhXO4EZme6eQJLR4PzcGsahsBOAwU6I3Vg=
+github.com/ccojocar/zxcvbn-go v1.0.2/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4=
github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ=
-github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 h1:W9o46d2kbNL06lq7UNDPV0zYLzkrde/bjIqO02eoll0=
-github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8/go.mod h1:gakxgyXaaPkxvLw1XQxNGK4I37ys9iBRzNUx/B7pUCo=
+github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc=
+github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww=
github.com/chigopher/pathlib v0.19.1 h1:RoLlUJc0CqBGwq239cilyhxPNLXTK+HXoASGyGznx5A=
github.com/chigopher/pathlib v0.19.1/go.mod h1:tzC1dZLW8o33UQpWkNkhvPwL5n4yyFRFm/jL1YGWFvY=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/ckaznocha/intrange v0.1.2 h1:3Y4JAxcMntgb/wABQ6e8Q8leMd26JbX2790lIss9MTI=
+github.com/ckaznocha/intrange v0.1.2/go.mod h1:RWffCw/vKBwHeOEwWdCikAtY0q4gGt8VhJZEEA5n+RE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo=
github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc=
-github.com/daixiang0/gci v0.11.0 h1:XeQbFKkCRxvVyn06EOuNY6LPGBLVuB/W130c8FrnX6A=
-github.com/daixiang0/gci v0.11.0/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI=
+github.com/daixiang0/gci v0.13.4 h1:61UGkmpoAcxHM2hhNkZEf5SzwQtWJXTSws7jaPyqwlw=
+github.com/daixiang0/gci v0.13.4/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU=
-github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c=
+github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8=
+github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY=
github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8=
github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw=
github.com/dgryski/go-minhash v0.0.0-20170608043002-7fe510aff544 h1:54Y/2GF52MSJ4n63HWvNDFRtztgm6tq2UrOX61sjGKc=
@@ -171,23 +178,20 @@ github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FM
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA=
-github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0=
-github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw=
-github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY=
-github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
+github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q=
+github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
+github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
+github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=
github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
-github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y=
-github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI=
+github.com/firefart/nonamedreturns v1.0.5 h1:tM+Me2ZaXs8tfdDw3X6DOX++wMCOqzYUho6tUTYIdRA=
+github.com/firefart/nonamedreturns v1.0.5/go.mod h1:gHJjDqhGM4WyPt639SOZs+G89Ko7QKH5R5BhnO6xJhw=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
-github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
-github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
+github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
+github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/frapposelli/wwhrd v0.4.0 h1:Vn4hjT/tHNeOnTxFBO0ys1NBH8/Inxqqi1Q0eJmCImo=
github.com/frapposelli/wwhrd v0.4.0/go.mod h1:Bzwvr3hY1yoBsBbIMkckeHUI6jf1cLRueaaMxZ3N9FY=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
@@ -195,11 +199,13 @@ github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
+github.com/ghostiam/protogetter v0.3.6 h1:R7qEWaSgFCsy20yYHNIJsU9ZOb8TziSRRxuAOTVKeOk=
+github.com/ghostiam/protogetter v0.3.6/go.mod h1:7lpeDnEJ1ZjL/YtyoN99ljO4z0pd3H0d18/t2dPBxHw=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4=
-github.com/go-critic/go-critic v0.9.0 h1:Pmys9qvU3pSML/3GEQ2Xd9RZ/ip+aXHKILuxczKGV/U=
-github.com/go-critic/go-critic v0.9.0/go.mod h1:5P8tdXL7m/6qnyG6oRAlYLORvoXH0WDypYgAEmagT40=
+github.com/go-critic/go-critic v0.11.4 h1:O7kGOCx0NDIni4czrkRIXTnit0mkyKOCePh3My6OyEU=
+github.com/go-critic/go-critic v0.11.4/go.mod h1:2QAdo4iuLik5S9YG0rT4wcZ8QxwHYkrr6/2MWAiv/vc=
github.com/go-enry/go-license-detector/v4 v4.3.0 h1:OFlQAVNw5FlKUjX4OuW8JOabu8MQHjTKDb9pdeNYMUw=
github.com/go-enry/go-license-detector/v4 v4.3.0/go.mod h1:HaM4wdNxSlz/9Gw0uVOKSQS5JVFqf2Pk8xUPEn6bldI=
github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E=
@@ -223,19 +229,20 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8=
github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU=
github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s=
github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw=
github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4=
-github.com/go-toolsmith/astequal v1.1.0 h1:kHKm1AWqClYn15R0K1KKE4RG614D46n+nqUQ06E1dTw=
github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ=
+github.com/go-toolsmith/astequal v1.2.0 h1:3Fs3CYZ1k9Vo4FzFhwwewC3CHISHDnVUPC4x0bI2+Cw=
+github.com/go-toolsmith/astequal v1.2.0/go.mod h1:c8NZ3+kSFtFY/8lPso4v8LuJjdJiUFVnSuU3s0qrrDY=
github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco=
github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4=
github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA=
@@ -247,6 +254,8 @@ github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQi
github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ=
github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus=
github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig=
+github.com/go-viper/mapstructure/v2 v2.0.0 h1:dhn8MZ1gZ0mzeodTG3jt5Vj/o87xZKuNAprG2mQfMfc=
+github.com/go-viper/mapstructure/v2 v2.0.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U=
github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
@@ -286,28 +295,25 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0=
-github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM=
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
-github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo=
-github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ=
-github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY=
-github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs=
-github.com/golangci/golangci-lint v1.54.2 h1:oR9zxfWYxt7hFqk6+fw6Enr+E7F0SN2nqHhJYyIb0yo=
-github.com/golangci/golangci-lint v1.54.2/go.mod h1:vnsaCTPKCI2wreL9tv7RkHDwUrz3htLjed6+6UsvcwU=
-github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=
-github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
-github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA=
-github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
-github.com/golangci/misspell v0.4.1 h1:+y73iSicVy2PqyX7kmUefHusENlrP9YwuHZHPLGQj/g=
-github.com/golangci/misspell v0.4.1/go.mod h1:9mAN1quEo3DlpbaIKKyEvRxK1pwqR9s/Sea1bJCtlNI=
-github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ=
-github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs=
-github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys=
-github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
+github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e h1:ULcKCDV1LOZPFxGZaA6TlQbiM3J2GCPnkx/bGF6sX/g=
+github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM=
+github.com/golangci/golangci-lint v1.59.1 h1:CRRLu1JbhK5avLABFJ/OHVSQ0Ie5c4ulsOId1h3TTks=
+github.com/golangci/golangci-lint v1.59.1/go.mod h1:jX5Oif4C7P0j9++YB2MMJmoNrb01NJ8ITqKWNLewThg=
+github.com/golangci/misspell v0.6.0 h1:JCle2HUTNWirNlDIAUO44hUsKhOFqGPoC4LZxlaSXDs=
+github.com/golangci/misspell v0.6.0/go.mod h1:keMNyY6R9isGaSAu+4Q8NMBwMPkh15Gtc8UCVoDtAWo=
+github.com/golangci/modinfo v0.3.4 h1:oU5huX3fbxqQXdfspamej74DFX0kyGLkw1ppvXoJ8GA=
+github.com/golangci/modinfo v0.3.4/go.mod h1:wytF1M5xl9u0ij8YSvhkEVPP3M5Mc7XLl1pxH3B2aUM=
+github.com/golangci/plugin-module-register v0.1.1 h1:TCmesur25LnyJkpsVrupv1Cdzo+2f7zX0H6Jkw1Ol6c=
+github.com/golangci/plugin-module-register v0.1.1/go.mod h1:TTpqoB6KkwOJMV8u7+NyXMrkwwESJLOkfl9TxR1DGFc=
+github.com/golangci/revgrep v0.5.3 h1:3tL7c1XBMtWHHqVpS5ChmiAAoe4PF/d5+ULzV9sLAzs=
+github.com/golangci/revgrep v0.5.3/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k=
+github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed h1:IURFTjxeTfNFP0hTEi1YKjB/ub8zkpaOqFFMApi2EAs=
+github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed/go.mod h1:XLXN8bNw4CGRPaqgl3bv/lhz7bsGPh4/xSaMTbo2vkQ=
github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc=
github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg=
github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks=
@@ -327,14 +333,14 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/licensecheck v0.3.1 h1:QoxgoDkaeC4nFrtGN1jV7IPmDCHFNIVh54e5hSt6sPs=
github.com/google/licensecheck v0.3.1/go.mod h1:ORkR35t/JjW+emNKtfJDII0zlciG9JgbT7SmsohlHmY=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
@@ -342,21 +348,16 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
-github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
+github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v0.0.0-20161107002406-da06d194a00e/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
-github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 h1:mrEEilTAUmaAORhssPPkxj84TsHrPMLBGW2Z4SoTxm8=
-github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0=
+github.com/gordonklaus/ineffassign v0.1.0 h1:y2Gd/9I7MdY1oEIt+n+rowjBNDcLQq3RsH5hwJd0f9s=
+github.com/gordonklaus/ineffassign v0.1.0/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0=
github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk=
github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc=
github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado=
@@ -371,13 +372,9 @@ github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoIS
github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU=
github.com/goware/modvendor v0.5.0 h1:3XXkmWdTccMzBswM5FTTXvWEtCV7DP7VRkIACRCGaqU=
github.com/goware/modvendor v0.5.0/go.mod h1:rtogeSlPLJT6MlypJyGp24o/vnHvF+ebCoTQrDX6oGY=
-github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
-github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
-github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
+github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
@@ -391,7 +388,6 @@ github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq
github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0=
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
@@ -404,14 +400,16 @@ github.com/jdkato/prose v1.1.0/go.mod h1:jkF0lkxaX5PFSlk9l4Gh9Y+T57TqUZziWT7uZbW
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc=
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
-github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM=
-github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
+github.com/jgautheron/goconst v1.7.1 h1:VpdAG7Ca7yvvJk5n8dMwQhfEZJh95kl/Hl9S1OI5Jkk=
+github.com/jgautheron/goconst v1.7.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs=
github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg=
github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48=
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0=
+github.com/jjti/go-spancheck v0.6.1 h1:ZK/wE5Kyi1VX3PJpUO2oEgeoI4FWOUm7Shb2Gbv5obI=
+github.com/jjti/go-spancheck v0.6.1/go.mod h1:vF1QkOO159prdo6mHRxak2CpzDpHAfKiPUDP/NeRnX8=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@@ -425,18 +423,18 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8
github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY=
github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0=
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/karamaru-alpha/copyloopvar v1.1.0 h1:x7gNyKcC2vRBO1H2Mks5u1VxQtYvFiym7fCjIP8RPos=
+github.com/karamaru-alpha/copyloopvar v1.1.0/go.mod h1:u7CIfztblY0jZLOQZgH3oYsJzpC2A7S6u/lfgSXHy0k=
github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
-github.com/kisielk/errcheck v1.6.3 h1:dEKh+GLHcWm2oN34nMvDzn1sqI0i0WxPvrgiJA5JuM8=
-github.com/kisielk/errcheck v1.6.3/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw=
-github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
+github.com/kisielk/errcheck v1.7.0 h1:+SbscKmWJ5mOK/bO1zS60F5I9WwZDWOfRsC4RwfwRV0=
+github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8=
-github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg=
+github.com/kkHAIKE/contextcheck v1.1.5 h1:CdnJh63tcDe53vG+RebdpdXJTc9atMgGqdx8LXxiilg=
+github.com/kkHAIKE/contextcheck v1.1.5/go.mod h1:O930cpht4xb1YQpK+1+AgoM3mFsvxr7uyFptcnWTYUA=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@@ -448,18 +446,22 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs=
github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I=
-github.com/kunwardeep/paralleltest v1.0.8 h1:Ul2KsqtzFxTlSU7IP0JusWlLiNqQaloB9vguyjbE558=
-github.com/kunwardeep/paralleltest v1.0.8/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY=
+github.com/kunwardeep/paralleltest v1.0.10 h1:wrodoaKYzS2mdNVnc4/w31YaXFtsc21PCTdvWJ/lDDs=
+github.com/kunwardeep/paralleltest v1.0.10/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY=
github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ=
github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA=
-github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA=
-github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0=
+github.com/lasiar/canonicalheader v1.1.1 h1:wC+dY9ZfiqiPwAexUApFush/csSPXeIi4QqyxXmng8I=
+github.com/lasiar/canonicalheader v1.1.1/go.mod h1:cXkb3Dlk6XXy+8MVQnF23CYKWlyA7kfQhSw2CcZtZb0=
+github.com/ldez/gomoddirectives v0.2.4 h1:j3YjBIjEBbqZ0NKtBNzr8rtMHTOrLPeiwTkfUJZ3alg=
+github.com/ldez/gomoddirectives v0.2.4/go.mod h1:oWu9i62VcQDYp9EQ0ONTfqLNh+mDLWWDO+SO0qSQw5g=
github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo=
github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4=
-github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU=
-github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY=
+github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY=
+github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA=
github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM=
github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM=
+github.com/macabu/inamedparam v0.1.3 h1:2tk/phHkMlEL/1GNe/Yf6kkR/hkcUdAEY3L0hjYV1Mk=
+github.com/macabu/inamedparam v0.1.3/go.mod h1:93FLICAIk/quk7eaPPQvbzihUdn/QkGDwIZEoLtpH6I=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI=
@@ -477,8 +479,9 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
-github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-sqlite3 v1.14.5/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI=
@@ -486,10 +489,8 @@ github.com/mattn/go-zglob v0.0.2-0.20191112051448-a8912a37f9e7 h1:6HgbBMgs3hI9y1
github.com/mattn/go-zglob v0.0.2-0.20191112051448-a8912a37f9e7/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo=
-github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc=
-github.com/mgechev/revive v1.3.2 h1:Wb8NQKBaALBJ3xrrj4zpwJwqwNA6nDpyJSEQWcCka6U=
-github.com/mgechev/revive v1.3.2/go.mod h1:UCLtc7o5vg5aXCwdUTU1kEBQ1v+YXPAkYDIDXbrs5I0=
+github.com/mgechev/revive v1.3.7 h1:502QY0vQGe9KtYJ9FpxMz9rL+Fc/P13CI5POL4uHCcE=
+github.com/mgechev/revive v1.3.7/go.mod h1:RJ16jUbF0OWC3co/+XTxmFNgEpUPwnnA0BRllX2aDNA=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@@ -510,37 +511,39 @@ github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4N
github.com/neurosnap/sentences v1.0.6 h1:iBVUivNtlwGkYsJblWV8GGVFmXzZzak907Ci8aA0VTE=
github.com/neurosnap/sentences v1.0.6/go.mod h1:pg1IapvYpWCJJm/Etxeh0+gtMf1rI1STY9S7eUCPbDc=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/nishanths/exhaustive v0.11.0 h1:T3I8nUGhl/Cwu5Z2hfc92l0e04D2GEW6e0l8pzda2l0=
-github.com/nishanths/exhaustive v0.11.0/go.mod h1:RqwDsZ1xY0dNdqHho2z6X+bgzizwbLYOWnZbbl2wLB4=
+github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhKRf3Swg=
+github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs=
github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk=
github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c=
-github.com/nunnatsa/ginkgolinter v0.13.5 h1:fOsPB4CEZOPkyMqF4B9hoqOpooFWU7vWSVkCSscVpgU=
-github.com/nunnatsa/ginkgolinter v0.13.5/go.mod h1:OBHy4536xtuX3102NM63XRtOyxqZOO02chsaeDWXVO8=
+github.com/nunnatsa/ginkgolinter v0.16.2 h1:8iLqHIZvN4fTLDC0Ke9tbSZVcyVHoBs0HIbnVSxfHJk=
+github.com/nunnatsa/ginkgolinter v0.16.2/go.mod h1:4tWRinDN1FeJgU+iJANW/kz7xKN5nYRAOfJDQUS9dOQ=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
-github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU=
-github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
-github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
-github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
-github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k=
+github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU=
+github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
+github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
+github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
+github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
+github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w=
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
-github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU=
-github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
+github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
+github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/polyfloyd/go-errorlint v1.4.4 h1:A9gytp+p6TYqeALTYRoxJESYP8wJRETRX2xzGWFsEBU=
-github.com/polyfloyd/go-errorlint v1.4.4/go.mod h1:ry5NqF7l9Q77V+XqAfUg1zfryrEtyac3G5+WVpIK0xU=
+github.com/polyfloyd/go-errorlint v1.5.2 h1:SJhVik3Umsjh7mte1vE0fVZ5T1gznasQG3PV7U5xFdA=
+github.com/polyfloyd/go-errorlint v1.5.2/go.mod h1:sH1QC1pxxi0fFecsVIzBmxtrgd9IF/SkJpA6wqyKAJs=
+github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
+github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
@@ -563,8 +566,10 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/quasilyte/go-ruleguard v0.4.0 h1:DyM6r+TKL+xbKB4Nm7Afd1IQh9kEUKQs2pboWGKtvQo=
-github.com/quasilyte/go-ruleguard v0.4.0/go.mod h1:Eu76Z/R8IXtViWUIHkE3p8gdH3/PKk1eh3YGfaEof10=
+github.com/quasilyte/go-ruleguard v0.4.2 h1:htXcXDK6/rO12kiTHKfHuqR4kr3Y4M0J0rOL6CH/BYs=
+github.com/quasilyte/go-ruleguard v0.4.2/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI=
+github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE=
+github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo=
github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng=
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU=
@@ -572,26 +577,29 @@ github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:r
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs=
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w=
github.com/rs/zerolog v1.29.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJHMLuTw=
-github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50=
-github.com/ryanrolds/sqlclosecheck v0.4.0 h1:i8SX60Rppc1wRuyQjMciLqIzV3xnoHB7/tXbr6RGYNI=
-github.com/ryanrolds/sqlclosecheck v0.4.0/go.mod h1:TBRRjzL31JONc9i4XMinicuo+s+E8yKZ5FN8X3G6CKQ=
+github.com/ryancurrah/gomodguard v1.3.2 h1:CuG27ulzEB1Gu5Dk5gP8PFxSOZ3ptSdP5iI/3IXxM18=
+github.com/ryancurrah/gomodguard v1.3.2/go.mod h1:LqdemiFomEjcxOqirbQCb3JFvSxH2JUYMerTFd3sF2o=
+github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU=
+github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ=
github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc=
github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI=
+github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4=
+github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw=
github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ=
-github.com/sashamelentyev/usestdlibvars v1.24.0 h1:MKNzmXtGh5N0y74Z/CIaJh4GlB364l0K1RUT08WSWAc=
-github.com/sashamelentyev/usestdlibvars v1.24.0/go.mod h1:9cYkq+gYJ+a5W2RPdhfaSCnTVUC1OQP/bSiiBhq3OZE=
-github.com/securego/gosec/v2 v2.17.0 h1:ZpAStTDKY39insEG9OH6kV3IkhQZPTq9a9eGOLOjcdI=
-github.com/securego/gosec/v2 v2.17.0/go.mod h1:lt+mgC91VSmriVoJLentrMkRCYs+HLTBnUFUBuhV2hc=
+github.com/sashamelentyev/usestdlibvars v1.26.0 h1:LONR2hNVKxRmzIrZR0PhSF3mhCAzvnr+DcUiHgREfXE=
+github.com/sashamelentyev/usestdlibvars v1.26.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8=
+github.com/securego/gosec/v2 v2.20.1-0.20240525090044-5f0084eb01a9 h1:rnO6Zp1YMQwv8AyxzuwsVohljJgp4L0ZqiCgtACsPsc=
+github.com/securego/gosec/v2 v2.20.1-0.20240525090044-5f0084eb01a9/go.mod h1:dg7lPlu/xK/Ut9SedURCoZbVCR4yC7fM65DtH9/CDHs=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
@@ -610,8 +618,6 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE=
github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4=
-github.com/sivchari/nosnakecase v1.7.0 h1:7QkpWIRMe8x25gckkFd2A5Pi6Ymo0qgr4JrhGt95do8=
-github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY=
github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak=
github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg=
github.com/skeema/knownhosts v1.1.1 h1:MTk78x9FPgDFVFkDLTrsnnfCJl7g1C/nnKvePgrIngE=
@@ -620,8 +626,8 @@ github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00=
github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo=
github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0=
github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs=
-github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk=
-github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
+github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
+github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
@@ -654,8 +660,7 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
@@ -668,22 +673,22 @@ github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA
github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0=
github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag=
github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY=
-github.com/tetafro/godot v1.4.14 h1:ScO641OHpf9UpHPk8fCknSuXNMpi4iFlwuWoBs3L+1s=
-github.com/tetafro/godot v1.4.14/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio=
+github.com/tetafro/godot v1.4.16 h1:4ChfhveiNLk4NveAZ9Pu2AN8QZ2nkUGFuadM9lrr5D0=
+github.com/tetafro/godot v1.4.16/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio=
github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M=
github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ=
github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4=
github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg=
-github.com/tomarrell/wrapcheck/v2 v2.8.1 h1:HxSqDSN0sAt0yJYsrcYVoEeyM4aI9yAm3KQpIXDJRhQ=
-github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE=
+github.com/tomarrell/wrapcheck/v2 v2.8.3 h1:5ov+Cbhlgi7s/a42BprYoxsr73CbdMUTzE3bRDFASUs=
+github.com/tomarrell/wrapcheck/v2 v2.8.3/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo=
github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw=
github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI=
github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4=
-github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI=
-github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
-github.com/uudashr/gocognit v1.0.7 h1:e9aFXgKgUJrQ5+bs61zBigmj7bFJ/5cC6HmMahVzuDo=
-github.com/uudashr/gocognit v1.0.7/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY=
+github.com/ultraware/whitespace v0.1.1 h1:bTPOGejYFulW3PkcrqkeQwOd6NKOOXvmGD9bo/Gk8VQ=
+github.com/ultraware/whitespace v0.1.1/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8=
+github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvniI=
+github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k=
github.com/vektra/mockery/v2 v2.40.1 h1:8D01rBqloDLDHKZGXkyUD9Yj5Z+oDXBqDZ+tRXYM/oA=
github.com/vektra/mockery/v2 v2.40.1/go.mod h1:dPzGtjT0/Uu4hqpF6QNHwz+GLago7lq1bxdj9wHbGKo=
github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad h1:W0LEBv82YCGEtcmPA3uNZBI33/qF//HAAs3MawDjRa0=
@@ -691,14 +696,14 @@ github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+
github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
-github.com/xen0n/gosmopolitan v1.2.1 h1:3pttnTuFumELBRSh+KQs1zcz4fN6Zy7aB0xlnQSn1Iw=
-github.com/xen0n/gosmopolitan v1.2.1/go.mod h1:JsHq/Brs1o050OOdmzHeOr0N7OtlnKRAGAsElF8xBQA=
+github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU=
+github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg=
github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM=
github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk=
-github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o=
-github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA=
-github.com/ykadowak/zerologlint v0.1.3 h1:TLy1dTW3Nuc+YE3bYRPToG1Q9Ej78b5UUN6bjbGdxPE=
-github.com/ykadowak/zerologlint v0.1.3/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg=
+github.com/yeya24/promlinter v0.3.0 h1:JVDbMp08lVCP7Y6NP3qHroGAO6z2yGKQtS5JsjqtoFs=
+github.com/yeya24/promlinter v0.3.0/go.mod h1:cDfJQQYv9uYciW60QT0eeHlFodotkYZlL+YcPQN+mW4=
+github.com/ykadowak/zerologlint v0.1.5 h1:Gy/fMz1dFQN9JZTPjv1hxEk+sRWm05row04Yoolgdiw=
+github.com/ykadowak/zerologlint v0.1.5/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -706,21 +711,24 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-gitlab.com/bosi/decorder v0.4.0 h1:HWuxAhSxIvsITcXeP+iIRg9d1cVfvVkmlF7M68GaoDY=
-gitlab.com/bosi/decorder v0.4.0/go.mod h1:xarnteyUoJiOTEldDysquWKTVDCKo2TOIOIibSuWqOg=
-go-simpler.org/assert v0.6.0 h1:QxSrXa4oRuo/1eHMXSBFHKvJIpWABayzKldqZyugG7E=
-go-simpler.org/assert v0.6.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28=
+gitlab.com/bosi/decorder v0.4.2 h1:qbQaV3zgwnBZ4zPMhGLW4KZe7A7NwxEhJx39R3shffo=
+gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8=
+go-simpler.org/assert v0.9.0 h1:PfpmcSvL7yAnWyChSjOz6Sp6m9j5lyK8Ok9pEL31YkQ=
+go-simpler.org/assert v0.9.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28=
+go-simpler.org/musttag v0.12.2 h1:J7lRc2ysXOq7eM8rwaTYnNrHd5JwjppzB6mScysB2Cs=
+go-simpler.org/musttag v0.12.2/go.mod h1:uN1DVIasMTQKk6XSik7yrJoEysGtR2GRqvWnI9S7TYM=
+go-simpler.org/sloglint v0.7.1 h1:qlGLiqHbN5islOxjeLXoPtUdZXb669RW+BDQ+xOSNoU=
+go-simpler.org/sloglint v0.7.1/go.mod h1:OlaVDRh/FKKd4X4sIMbsz8st97vomydceL146Fthh/c=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.tmz.dev/musttag v0.7.2 h1:1J6S9ipDbalBSODNT5jCep8dhZyMr4ttnjQagmGYR5s=
-go.tmz.dev/musttag v0.7.2/go.mod h1:m6q5NiiSKMnQYokefa2xGoyoXnrswCbJ0AWYzf4Zs28=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
+go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
@@ -735,16 +743,14 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
-golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
-golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
+golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
+golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -759,12 +765,12 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea h1:vLCWI/yYrdEHyN2JzIzPO3aaQJHQdp89IZBA/+azVC4=
-golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
+golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM=
+golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
-golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 h1:jWGQJV4niP+CCmFW9ekjA9Zx8vYORzOUH2/Nl5WPuLQ=
-golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
+golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8=
+golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@@ -778,7 +784,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f h1:kgfVkAEEQXXQ0qc6dH7n6y37NAYmTFmz0YRwrRjgxKw=
@@ -790,7 +795,6 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
@@ -800,8 +804,9 @@ golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
+golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -831,9 +836,6 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@@ -847,18 +849,14 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
-golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
-golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
+golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
+golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/oauth2 v0.0.0-20170207211851-4464e7848382/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/perf v0.0.0-20210220033136-40a54f11e909 h1:rWw0Gj4DMl/2otJ8CnfTcwOWkpROAc6qhXXoMrYOCgo=
golang.org/x/perf v0.0.0-20210220033136-40a54f11e909/go.mod h1:KRSrLY7jerMEa0Ih7gBheQ3FYDiSx6liMnniX1o3j2g=
@@ -875,8 +873,9 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -910,18 +909,13 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -945,8 +939,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
+golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -956,14 +950,13 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
-golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
-golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
+golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
+golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
@@ -972,8 +965,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
-golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
-golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
+golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1027,14 +1020,7 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
@@ -1049,8 +1035,8 @@ golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
-golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss=
-golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
+golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
+golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1078,16 +1064,12 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
-google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
-google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -1117,13 +1099,6 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v0.0.0-20170208002647-2a6bf6142e96/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@@ -1137,10 +1112,6 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
-google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1153,8 +1124,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
-google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1191,16 +1162,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.4.5 h1:YGD4H+SuIOOqsyoLOpZDWcieM28W47/zRO7f+9V3nvo=
-honnef.co/go/tools v0.4.5/go.mod h1:GUV+uIBCLpdf0/v6UhHHG/yzI/z6qPskBeQCjcNB96k=
-mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E=
-mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js=
-mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I=
-mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
-mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo=
-mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
-mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d h1:3rvTIIM22r9pvXk+q3swxUQAQOxksVMGK7sml4nG57w=
-mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d/go.mod h1:IeHQjmn6TOD+e4Z3RFiZMMsLVL+A96Nvptar8Fj71is=
+honnef.co/go/tools v0.4.7 h1:9MDAWxMoSnB6QoSqiVr7P5mtkT9pOc1kSxchzPCnqJs=
+honnef.co/go/tools v0.4.7/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0=
+mvdan.cc/gofumpt v0.6.0 h1:G3QvahNDmpD+Aek/bNOLrFR2XC6ZAdo62dZu65gmwGo=
+mvdan.cc/gofumpt v0.6.0/go.mod h1:4L0wf+kgIPZtcCWXynNS2e6bhmj73umwnuXSZarixzA=
+mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f h1:lMpcwN6GxNbWtbpI1+xzFLSW8XzX0u72NttUGVFjO3U=
+mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f/go.mod h1:RSLa7mKKCNeTTMHBw5Hsy2rfJmd6O2ivt9Dw9ZqCQpQ=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
diff --git a/omnibus/config/software/datadog-agent-integrations-py3.rb b/omnibus/config/software/datadog-agent-integrations-py3.rb
index af9d70bb1f668..ce1939d947a55 100644
--- a/omnibus/config/software/datadog-agent-integrations-py3.rb
+++ b/omnibus/config/software/datadog-agent-integrations-py3.rb
@@ -120,7 +120,7 @@
"CFLAGS" => "-I#{install_dir}/embedded/include -I/opt/mqm/inc",
"CXXFLAGS" => "-I#{install_dir}/embedded/include -I/opt/mqm/inc",
"LDFLAGS" => "-L#{install_dir}/embedded/lib -L/opt/mqm/lib64 -L/opt/mqm/lib",
- "LD_RUN_PATH" => "#{install_dir}/embedded/lib -L/opt/mqm/lib64 -L/opt/mqm/lib",
+ "LD_RUN_PATH" => "#{install_dir}/embedded/lib",
"PATH" => "#{install_dir}/embedded/bin:#{ENV['PATH']}",
}
diff --git a/pkg/clusteragent/languagedetection/patcher.go b/pkg/clusteragent/languagedetection/patcher.go
index 778d69a385f8f..0db562ef67609 100644
--- a/pkg/clusteragent/languagedetection/patcher.go
+++ b/pkg/clusteragent/languagedetection/patcher.go
@@ -179,9 +179,9 @@ func (lp *languagePatcher) handleDeploymentEvent(event workloadmeta.Event) {
deploymentID := event.Entity.(*workloadmeta.KubernetesDeployment).ID
// extract deployment name and namespace from entity id
- deploymentIds := strings.Split(deploymentID, "/")
- namespace := deploymentIds[0]
- deploymentName := deploymentIds[1]
+ deploymentIDs := strings.Split(deploymentID, "/")
+ namespace := deploymentIDs[0]
+ deploymentName := deploymentIDs[1]
// get the complete entity
deployment, err := lp.store.GetKubernetesDeployment(deploymentID)
diff --git a/pkg/collector/corechecks/ebpf/probe/ebpfcheck/prog.go b/pkg/collector/corechecks/ebpf/probe/ebpfcheck/prog.go
index 26873926fe0b5..2dccb7f0b2a7f 100644
--- a/pkg/collector/corechecks/ebpf/probe/ebpfcheck/prog.go
+++ b/pkg/collector/corechecks/ebpf/probe/ebpfcheck/prog.go
@@ -67,8 +67,8 @@ type ProgInfo struct {
XlatedProgInsns Pointer
LoadTime uint64
CreatedByUID uint32
- NrMapIds uint32
- MapIds Pointer
+ NrMapIDs uint32
+ MapIDs Pointer
Name ObjName
Ifindex uint32
_ [4]byte /* unsupported bitfield */
diff --git a/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go b/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go
index ce9fbfe8d65e8..86921a64c3b07 100644
--- a/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go
+++ b/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go
@@ -708,7 +708,7 @@ func Test_getProfileForSysObjectID(t *testing.T) {
Metrics: []profiledefinition.MetricsConfig{
{Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4.5", Name: "someMetric"}},
},
- SysObjectIds: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.4.*"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.4.*"},
},
},
"profile2": profile.ProfileConfig{
@@ -716,7 +716,7 @@ func Test_getProfileForSysObjectID(t *testing.T) {
Metrics: []profiledefinition.MetricsConfig{
{Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4.5", Name: "someMetric"}},
},
- SysObjectIds: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.4.10"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.4.10"},
},
},
"profile3": profile.ProfileConfig{
@@ -724,7 +724,7 @@ func Test_getProfileForSysObjectID(t *testing.T) {
Metrics: []profiledefinition.MetricsConfig{
{Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4.5", Name: "someMetric"}},
},
- SysObjectIds: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.4.5.*"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.4.5.*"},
},
},
}
@@ -734,7 +734,7 @@ func Test_getProfileForSysObjectID(t *testing.T) {
Metrics: []profiledefinition.MetricsConfig{
{Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4.5", Name: "someMetric"}},
},
- SysObjectIds: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.***.*"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.***.*"},
},
},
}
@@ -744,7 +744,7 @@ func Test_getProfileForSysObjectID(t *testing.T) {
Metrics: []profiledefinition.MetricsConfig{
{Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4.5", Name: "someMetric"}},
},
- SysObjectIds: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.[.*"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.[.*"},
},
},
}
@@ -754,7 +754,7 @@ func Test_getProfileForSysObjectID(t *testing.T) {
Metrics: []profiledefinition.MetricsConfig{
{Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4.5", Name: "someMetric"}},
},
- SysObjectIds: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3"},
},
},
"profile2": profile.ProfileConfig{
@@ -762,7 +762,7 @@ func Test_getProfileForSysObjectID(t *testing.T) {
Metrics: []profiledefinition.MetricsConfig{
{Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4.5", Name: "someMetric"}},
},
- SysObjectIds: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3"},
},
},
"profile3": profile.ProfileConfig{
@@ -770,7 +770,7 @@ func Test_getProfileForSysObjectID(t *testing.T) {
Metrics: []profiledefinition.MetricsConfig{
{Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4.5", Name: "someMetric"}},
},
- SysObjectIds: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.4"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.4"},
},
},
}
@@ -780,7 +780,7 @@ func Test_getProfileForSysObjectID(t *testing.T) {
Metrics: []profiledefinition.MetricsConfig{
{Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4.5", Name: "userMetric"}},
},
- SysObjectIds: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3"},
},
IsUserProfile: true,
},
@@ -789,7 +789,7 @@ func Test_getProfileForSysObjectID(t *testing.T) {
Metrics: []profiledefinition.MetricsConfig{
{Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4.5", Name: "defaultMetric"}},
},
- SysObjectIds: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3"},
},
},
}
@@ -799,7 +799,7 @@ func Test_getProfileForSysObjectID(t *testing.T) {
Metrics: []profiledefinition.MetricsConfig{
{Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4.5", Name: "defaultMetric"}},
},
- SysObjectIds: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3"},
},
},
"user-profile": profile.ProfileConfig{
@@ -807,7 +807,7 @@ func Test_getProfileForSysObjectID(t *testing.T) {
Metrics: []profiledefinition.MetricsConfig{
{Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4.5", Name: "userMetric"}},
},
- SysObjectIds: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3"},
},
IsUserProfile: true,
},
@@ -818,7 +818,7 @@ func Test_getProfileForSysObjectID(t *testing.T) {
Metrics: []profiledefinition.MetricsConfig{
{Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4.5", Name: "defaultMetric"}},
},
- SysObjectIds: profiledefinition.StringArray{"1.3.*"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.*"},
},
},
"user-profile": profile.ProfileConfig{
@@ -826,7 +826,7 @@ func Test_getProfileForSysObjectID(t *testing.T) {
Metrics: []profiledefinition.MetricsConfig{
{Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4.5", Name: "userMetric"}},
},
- SysObjectIds: profiledefinition.StringArray{"1.*"},
+ SysObjectIDs: profiledefinition.StringArray{"1.*"},
},
IsUserProfile: true,
},
@@ -837,7 +837,7 @@ func Test_getProfileForSysObjectID(t *testing.T) {
Metrics: []profiledefinition.MetricsConfig{
{Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4.5", Name: "someMetric"}},
},
- SysObjectIds: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3"},
},
IsUserProfile: true,
},
@@ -846,7 +846,7 @@ func Test_getProfileForSysObjectID(t *testing.T) {
Metrics: []profiledefinition.MetricsConfig{
{Symbol: profiledefinition.SymbolConfig{OID: "1.2.3.4.5", Name: "someMetric"}},
},
- SysObjectIds: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3"},
},
IsUserProfile: true,
},
@@ -1120,7 +1120,7 @@ func Test_snmpConfig_setProfile(t *testing.T) {
},
},
},
- SysObjectIds: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.4.*"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.4.*"},
}
profile2 := profiledefinition.ProfileDefinition{
Device: profiledefinition.DeviceMeta{Vendor: "b-vendor"},
@@ -1171,7 +1171,7 @@ func Test_snmpConfig_setProfile(t *testing.T) {
},
},
},
- SysObjectIds: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.4.*"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.4.*"},
}
mockProfiles := profile.ProfileConfigMap{
diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile.go b/pkg/collector/corechecks/snmp/internal/profile/profile.go
index 031d9f945c3f8..2640b177d0b97 100644
--- a/pkg/collector/corechecks/snmp/internal/profile/profile.go
+++ b/pkg/collector/corechecks/snmp/internal/profile/profile.go
@@ -55,7 +55,7 @@ func GetProfileForSysObjectID(profiles ProfileConfigMap, sysObjectID string) (st
var matchedOids []string
for profile, profConfig := range profiles {
- for _, oidPattern := range profConfig.Definition.SysObjectIds {
+ for _, oidPattern := range profConfig.Definition.SysObjectIDs {
found, err := filepath.Match(oidPattern, sysObjectID)
if err != nil {
log.Debugf("pattern error: %s", err)
diff --git a/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go b/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go
index f1d68debaf046..d86bd9e40379c 100644
--- a/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go
+++ b/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go
@@ -74,7 +74,7 @@ func FixtureProfileDefinitionMap() ProfileConfigMap {
Metrics: metrics,
Extends: []string{"_base.yaml", "_generic-if.yaml"},
Device: profiledefinition.DeviceMeta{Vendor: "f5"},
- SysObjectIds: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.4.*"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.3375.2.1.3.4.*"},
StaticTags: []string{"static_tag:from_profile_root", "static_tag:from_base_profile"},
MetricTags: []profiledefinition.MetricTagConfig{
{
@@ -190,7 +190,7 @@ func FixtureProfileDefinitionMap() ProfileConfigMap {
},
"another_profile": ProfileConfig{
Definition: profiledefinition.ProfileDefinition{
- SysObjectIds: profiledefinition.StringArray{"1.3.6.1.4.1.32473.1.1"},
+ SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.32473.1.1"},
Metrics: []profiledefinition.MetricsConfig{
{Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.1.999.0", Name: "anotherMetric"}, MetricType: ""},
},
diff --git a/pkg/databasemonitoring/aws/aurora_test.go b/pkg/databasemonitoring/aws/aurora_test.go
index 27d5a7e92a04a..208054921686d 100644
--- a/pkg/databasemonitoring/aws/aurora_test.go
+++ b/pkg/databasemonitoring/aws/aurora_test.go
@@ -10,12 +10,13 @@ package aws
import (
"context"
"errors"
+ "testing"
+
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/rds"
"github.com/aws/aws-sdk-go-v2/service/rds/types"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
- "testing"
)
type mockrdsServiceConfigurer func(k *MockrdsService)
@@ -24,14 +25,14 @@ func TestGetAuroraClusterEndpoints(t *testing.T) {
testCases := []struct {
name string
configureClient mockrdsServiceConfigurer
- clusterIds []string
+ clusterIDs []string
expectedAuroraClusterEndpoints map[string]*AuroraCluster
expectedErr error
}{
{
name: "no cluster ids given",
configureClient: func(k *MockrdsService) {},
- clusterIds: nil,
+ clusterIDs: nil,
expectedErr: errors.New("at least one database cluster identifier is required"),
},
{
@@ -39,7 +40,7 @@ func TestGetAuroraClusterEndpoints(t *testing.T) {
configureClient: func(k *MockrdsService) {
k.EXPECT().DescribeDBInstances(gomock.Any(), gomock.Any()).Return(&rds.DescribeDBInstancesOutput{}, nil).Times(1)
},
- clusterIds: []string{"test-cluster"},
+ clusterIDs: []string{"test-cluster"},
expectedAuroraClusterEndpoints: nil,
expectedErr: errors.New("no endpoints found for aurora clusters with id(s): test-cluster"),
},
@@ -48,7 +49,7 @@ func TestGetAuroraClusterEndpoints(t *testing.T) {
configureClient: func(k *MockrdsService) {
k.EXPECT().DescribeDBInstances(gomock.Any(), gomock.Any()).Return(nil, errors.New("big time error")).Times(1)
},
- clusterIds: []string{"test-cluster"},
+ clusterIDs: []string{"test-cluster"},
expectedAuroraClusterEndpoints: nil,
expectedErr: errors.New("error running GetAuroraClusterEndpoints big time error"),
},
@@ -71,7 +72,7 @@ func TestGetAuroraClusterEndpoints(t *testing.T) {
},
}, nil).Times(1)
},
- clusterIds: []string{"test-cluster"},
+ clusterIDs: []string{"test-cluster"},
expectedAuroraClusterEndpoints: map[string]*AuroraCluster{
"test-cluster": {
Instances: []*Instance{
@@ -126,7 +127,7 @@ func TestGetAuroraClusterEndpoints(t *testing.T) {
},
}, nil).Times(1)
},
- clusterIds: []string{"test-cluster"},
+ clusterIDs: []string{"test-cluster"},
expectedAuroraClusterEndpoints: map[string]*AuroraCluster{
"test-cluster": {
Instances: []*Instance{
@@ -193,7 +194,7 @@ func TestGetAuroraClusterEndpoints(t *testing.T) {
},
}, nil).Times(1)
},
- clusterIds: []string{"test-cluster"},
+ clusterIDs: []string{"test-cluster"},
expectedAuroraClusterEndpoints: map[string]*AuroraCluster{
"test-cluster": {
Instances: []*Instance{
@@ -226,7 +227,7 @@ func TestGetAuroraClusterEndpoints(t *testing.T) {
},
}, nil).Times(1)
},
- clusterIds: []string{"test-cluster", "test-cluster-2"},
+ clusterIDs: []string{"test-cluster", "test-cluster-2"},
expectedAuroraClusterEndpoints: map[string]*AuroraCluster{
"test-cluster": {
Instances: []*Instance{
@@ -281,7 +282,7 @@ func TestGetAuroraClusterEndpoints(t *testing.T) {
},
}, nil).Times(1)
},
- clusterIds: []string{"test-cluster", "test-cluster-2"},
+ clusterIDs: []string{"test-cluster", "test-cluster-2"},
expectedAuroraClusterEndpoints: map[string]*AuroraCluster{
"test-cluster": {
Instances: []*Instance{
@@ -319,7 +320,7 @@ func TestGetAuroraClusterEndpoints(t *testing.T) {
mockClient := NewMockrdsService(ctrl)
tt.configureClient(mockClient)
client := &Client{client: mockClient}
- clusters, err := client.GetAuroraClusterEndpoints(context.Background(), tt.clusterIds)
+ clusters, err := client.GetAuroraClusterEndpoints(context.Background(), tt.clusterIDs)
if tt.expectedErr != nil {
assert.EqualError(t, err, tt.expectedErr.Error())
return
@@ -598,12 +599,12 @@ func TestGetAuroraClustersFromTags(t *testing.T) {
}
}
-func createDescribeDBInstancesRequest(clusterIds []string) *rds.DescribeDBInstancesInput {
+func createDescribeDBInstancesRequest(clusterIDs []string) *rds.DescribeDBInstancesInput {
return &rds.DescribeDBInstancesInput{
Filters: []types.Filter{
{
Name: aws.String("db-cluster-id"),
- Values: clusterIds,
+ Values: clusterIDs,
},
},
}
diff --git a/pkg/networkdevice/profile/profiledefinition/profile_definition.go b/pkg/networkdevice/profile/profiledefinition/profile_definition.go
index c58d27c1ae9ea..3aa2c2ec1bcb2 100644
--- a/pkg/networkdevice/profile/profiledefinition/profile_definition.go
+++ b/pkg/networkdevice/profile/profiledefinition/profile_definition.go
@@ -20,7 +20,7 @@ type DeviceMeta struct {
type ProfileDefinition struct {
Name string `yaml:"name" json:"name"`
Description string `yaml:"description,omitempty" json:"description,omitempty"`
- SysObjectIds StringArray `yaml:"sysobjectid,omitempty" json:"sysobjectid,omitempty"`
+ SysObjectIDs StringArray `yaml:"sysobjectid,omitempty" json:"sysobjectid,omitempty"`
Extends []string `yaml:"extends,omitempty" json:"extends,omitempty"`
Metadata MetadataConfig `yaml:"metadata,omitempty" json:"metadata,omitempty" jsonschema:"-"`
MetricTags []MetricTagConfig `yaml:"metric_tags,omitempty" json:"metric_tags,omitempty"`
diff --git a/pkg/networkdevice/profile/profiledefinition/yaml_utils_test.go b/pkg/networkdevice/profile/profiledefinition/yaml_utils_test.go
index c54fae528ddca..a33102ed28f56 100644
--- a/pkg/networkdevice/profile/profiledefinition/yaml_utils_test.go
+++ b/pkg/networkdevice/profile/profiledefinition/yaml_utils_test.go
@@ -14,7 +14,7 @@ import (
)
type MyStringArray struct {
- SomeIds StringArray `yaml:"my_field"`
+ SomeIDs StringArray `yaml:"my_field"`
}
type MySymbolStruct struct {
@@ -47,7 +47,7 @@ metric_tags:
func TestStringArray_UnmarshalYAML_array(t *testing.T) {
myStruct := MyStringArray{}
- expected := MyStringArray{SomeIds: StringArray{"aaa", "bbb"}}
+ expected := MyStringArray{SomeIDs: StringArray{"aaa", "bbb"}}
yaml.Unmarshal([]byte(`
my_field:
@@ -60,7 +60,7 @@ my_field:
func TestStringArray_UnmarshalYAML_string(t *testing.T) {
myStruct := MyStringArray{}
- expected := MyStringArray{SomeIds: StringArray{"aaa"}}
+ expected := MyStringArray{SomeIDs: StringArray{"aaa"}}
yaml.Unmarshal([]byte(`
my_field: aaa
diff --git a/pkg/security/secl/rules/opts.go b/pkg/security/secl/rules/opts.go
index 8ded8a1331dce..cc4ee53cc284a 100644
--- a/pkg/security/secl/rules/opts.go
+++ b/pkg/security/secl/rules/opts.go
@@ -52,8 +52,8 @@ func (o *Opts) WithEventTypeEnabled(eventTypes map[eval.EventType]bool) *Opts {
}
// WithReservedRuleIDs set reserved rule ids
-func (o *Opts) WithReservedRuleIDs(ruleIds []RuleID) *Opts {
- o.ReservedRuleIDs = ruleIds
+func (o *Opts) WithReservedRuleIDs(ruleIDs []RuleID) *Opts {
+ o.ReservedRuleIDs = ruleIDs
return o
}
diff --git a/release.json b/release.json
index 414621609f64a..696dcf39ad56d 100644
--- a/release.json
+++ b/release.json
@@ -1,17 +1,17 @@
{
- "base_branch": "7.53.x",
- "current_milestone": "7.53.0",
+ "base_branch": "6.53.x",
+ "current_milestone": "6.53.0",
"last_stable": {
"6": "6.52.0",
"7": "7.52.0"
},
"nightly": {
"INTEGRATIONS_CORE_VERSION": "7.53.x",
- "OMNIBUS_SOFTWARE_VERSION": "7.53.x",
- "OMNIBUS_RUBY_VERSION": "7.53.x",
+ "OMNIBUS_SOFTWARE_VERSION": "6.53.x",
+ "OMNIBUS_RUBY_VERSION": "6.53.x",
"JMXFETCH_VERSION": "0.49.0",
"JMXFETCH_HASH": "b5c2c3ff27603f469bb11961d559f1154887963e02b9d70d5f1fc7efa527a486",
- "MACOS_BUILD_VERSION": "7.53.x",
+ "MACOS_BUILD_VERSION": "nschweitzer/master",
"WINDOWS_DDNPM_DRIVER": "release-signed",
"WINDOWS_DDNPM_VERSION": "2.6.0",
"WINDOWS_DDNPM_SHASUM": "b1611ad4ceb8366c88767aeb638abefb226081efbf546b8b886952dd1b18ec05",
@@ -26,11 +26,11 @@
},
"nightly-a7": {
"INTEGRATIONS_CORE_VERSION": "7.53.x",
- "OMNIBUS_SOFTWARE_VERSION": "7.53.x",
- "OMNIBUS_RUBY_VERSION": "7.53.x",
+ "OMNIBUS_SOFTWARE_VERSION": "6.53.x",
+ "OMNIBUS_RUBY_VERSION": "6.53.x",
"JMXFETCH_VERSION": "0.49.0",
"JMXFETCH_HASH": "b5c2c3ff27603f469bb11961d559f1154887963e02b9d70d5f1fc7efa527a486",
- "MACOS_BUILD_VERSION": "7.53.x",
+ "MACOS_BUILD_VERSION": "nschweitzer/master",
"WINDOWS_DDNPM_DRIVER": "release-signed",
"WINDOWS_DDNPM_VERSION": "2.6.0",
"WINDOWS_DDNPM_SHASUM": "b1611ad4ceb8366c88767aeb638abefb226081efbf546b8b886952dd1b18ec05",
@@ -45,8 +45,8 @@
},
"release-a6": {
"INTEGRATIONS_CORE_VERSION": "7.53.0",
- "OMNIBUS_SOFTWARE_VERSION": "7.53.0",
- "OMNIBUS_RUBY_VERSION": "7.53.0",
+ "OMNIBUS_SOFTWARE_VERSION": "6.53.0",
+ "OMNIBUS_RUBY_VERSION": "6.53.0",
"JMXFETCH_VERSION": "0.49.0",
"JMXFETCH_HASH": "b5c2c3ff27603f469bb11961d559f1154887963e02b9d70d5f1fc7efa527a486",
"SECURITY_AGENT_POLICIES_VERSION": "v0.53.1",
@@ -60,8 +60,8 @@
},
"release-a7": {
"INTEGRATIONS_CORE_VERSION": "7.53.0",
- "OMNIBUS_SOFTWARE_VERSION": "7.53.0",
- "OMNIBUS_RUBY_VERSION": "7.53.0",
+ "OMNIBUS_SOFTWARE_VERSION": "6.53.0",
+ "OMNIBUS_RUBY_VERSION": "6.53.0",
"JMXFETCH_VERSION": "0.49.0",
"JMXFETCH_HASH": "b5c2c3ff27603f469bb11961d559f1154887963e02b9d70d5f1fc7efa527a486",
"SECURITY_AGENT_POLICIES_VERSION": "v0.53.1",
@@ -103,4 +103,4 @@
"dca-1.9.0": {
"SECURITY_AGENT_POLICIES_VERSION": "v0.3"
}
-}
\ No newline at end of file
+}
diff --git a/tasks/__init__.py b/tasks/__init__.py
index 052de1f32aad9..44f761a05ffe9 100644
--- a/tasks/__init__.py
+++ b/tasks/__init__.py
@@ -69,7 +69,7 @@
from tasks.install_tasks import download_tools, install_shellcheck, install_tools
from tasks.junit_tasks import junit_upload
from tasks.libs.go_workspaces import handle_go_work
-from tasks.linter_tasks import lint_copyrights, lint_filenames, lint_go, lint_python
+from tasks.linter_tasks import gitlab_ci, lint_copyrights, lint_filenames, lint_go, lint_python
from tasks.pr_checks import lint_releasenote
from tasks.show_linters_issues import show_linters_issues
from tasks.unit_tests import invoke_unit_tests
@@ -97,6 +97,7 @@
ns.add_task(lint_filenames)
ns.add_task(lint_python)
ns.add_task(lint_go)
+ns.add_task(gitlab_ci, "lint-gitlab")
ns.add_task(show_linters_issues)
ns.add_task(go_version)
ns.add_task(update_go)
diff --git a/tasks/libs/common/gitlab.py b/tasks/libs/common/gitlab.py
index ba6b8c81ab6c4..533ab9b31c2ad 100644
--- a/tasks/libs/common/gitlab.py
+++ b/tasks/libs/common/gitlab.py
@@ -2,8 +2,10 @@
import os
import platform
import subprocess
+from collections import UserList
from urllib.parse import quote
+import yaml
from invoke.exceptions import Exit
from tasks.libs.common.remote_api import APIError, RemoteAPI
@@ -288,6 +290,15 @@ def find_tag(self, tag_name):
else:
raise e
+ def lint(self, configuration):
+ """
+ Lint a gitlab-ci configuration.
+ """
+ path = f"/projects/{quote(self.project_name, safe='')}/ci/lint?dry_run=true&include_jobs=true"
+ headers = {"Content-Type": "application/json"}
+ data = {"content": configuration}
+ return self.make_request(path, headers=headers, data=data, json_input=True, json_output=True)
+
def make_request(
self, path, headers=None, data=None, json_input=False, json_output=False, stream_output=False, method=None
):
@@ -353,3 +364,175 @@ def get_gitlab_bot_token():
)
raise Exit(code=1)
return os.environ["GITLAB_BOT_TOKEN"]
+
+
+class ReferenceTag(yaml.YAMLObject):
+ """
+ Custom yaml tag to handle references in gitlab-ci configuration
+ """
+
+ yaml_tag = u'!reference'
+
+ def __init__(self, references):
+ self.references = references
+
+ @classmethod
+ def from_yaml(cls, loader, node):
+ return UserList(loader.construct_sequence(node))
+
+ @classmethod
+ def to_yaml(cls, dumper, data):
+ return dumper.represent_sequence(cls.yaml_tag, data.data, flow_style=True)
+
+
+def generate_gitlab_full_configuration(input_file, context=None, compare_to=None):
+ """
+ Generate a full gitlab-ci configuration by resolving all includes
+ """
+ # Update loader/dumper to handle !reference tag
+ yaml.SafeLoader.add_constructor(ReferenceTag.yaml_tag, ReferenceTag.from_yaml)
+ yaml.SafeDumper.add_representer(UserList, ReferenceTag.to_yaml)
+
+ yaml_contents = []
+ read_includes(input_file, yaml_contents)
+ full_configuration = {}
+ for yaml_file in yaml_contents:
+ full_configuration.update(yaml_file)
+ # Override some variables with a dedicated context
+ if context:
+ full_configuration["variables"].update(context)
+ if compare_to:
+ for value in full_configuration.values():
+ if (
+ isinstance(value, dict)
+ and "changes" in value
+ and isinstance(value["changes"], dict)
+ and "compare_to" in value["changes"]
+ ):
+ value["changes"]["compare_to"] = compare_to
+ elif isinstance(value, list):
+ for v in value:
+ if (
+ isinstance(v, dict)
+ and "changes" in v
+ and isinstance(v["changes"], dict)
+ and "compare_to" in v["changes"]
+ ):
+ v["changes"]["compare_to"] = compare_to
+ return yaml.safe_dump(full_configuration)
+
+
+def read_includes(yaml_file, includes):
+ """
+ Recursive method to read all includes from yaml files and store them in a list
+ """
+ current_file = read_content(yaml_file)
+ if 'include' not in current_file:
+ includes.append(current_file)
+ else:
+ for include in current_file['include']:
+ read_includes(include, includes)
+ del current_file['include']
+ includes.append(current_file)
+
+
+def read_content(file_path):
+ """
+ Read the content of a file, either from a local file or from an http endpoint
+ """
+ content = None
+ if file_path.startswith('http'):
+ import requests
+
+ response = requests.get(file_path)
+ response.raise_for_status()
+ content = response.text
+ else:
+ with open(file_path) as f:
+ content = f.read()
+ return yaml.safe_load(content)
+
+
+def get_preset_contexts(required_tests):
+ possible_tests = ["all", "main", "release", "mq"]
+ required_tests = required_tests.casefold().split(",")
+ if set(required_tests) | set(possible_tests) != set(possible_tests):
+ raise Exit(f"Invalid test required: {required_tests} must contain only values from {possible_tests}", 1)
+ main_contexts = [
+ ("BUCKET_BRANCH", ["nightly"]), # ["dev", "nightly", "beta", "stable", "oldnightly"]
+ ("CI_COMMIT_BRANCH", ["main"]), # ["main", "mq-working-branch-main", "7.42.x", "any/name"]
+ ("CI_COMMIT_TAG", [""]), # ["", "1.2.3-rc.4", "6.6.6"]
+ ("CI_PIPELINE_SOURCE", ["pipeline"]), # ["trigger", "pipeline", "schedule"]
+ ("DEPLOY_AGENT", ["true"]),
+ ("RUN_ALL_BUILDS", ["true"]),
+ ("RUN_E2E_TESTS", ["auto"]),
+ ("RUN_KMT_TESTS", ["on"]),
+ ("RUN_UNIT_TESTS", ["on"]),
+ ("TESTING_CLEANUP", ["true"]),
+ ]
+ release_contexts = [
+ ("BUCKET_BRANCH", ["stable"]),
+ ("CI_COMMIT_BRANCH", ["7.42.x"]),
+ ("CI_COMMIT_TAG", ["3.2.1", "1.2.3-rc.4"]),
+ ("CI_PIPELINE_SOURCE", ["schedule"]),
+ ("DEPLOY_AGENT", ["true"]),
+ ("RUN_ALL_BUILDS", ["true"]),
+ ("RUN_E2E_TESTS", ["auto"]),
+ ("RUN_KMT_TESTS", ["on"]),
+ ("RUN_UNIT_TESTS", ["on"]),
+ ("TESTING_CLEANUP", ["true"]),
+ ]
+ mq_contexts = [
+ ("BUCKET_BRANCH", ["dev"]),
+ ("CI_COMMIT_BRANCH", ["mq-working-branch-main"]),
+ ("CI_PIPELINE_SOURCE", ["pipeline"]),
+ ("DEPLOY_AGENT", ["false"]),
+ ("RUN_ALL_BUILDS", ["false"]),
+ ("RUN_E2E_TESTS", ["auto"]),
+ ("RUN_KMT_TESTS", ["off"]),
+ ("RUN_UNIT_TESTS", ["off"]),
+ ("TESTING_CLEANUP", ["false"]),
+ ]
+ all_contexts = []
+ for test in required_tests:
+ if test in ["all", "main"]:
+ generate_contexts(main_contexts, [], all_contexts)
+ if test in ["all", "release"]:
+ generate_contexts(release_contexts, [], all_contexts)
+ if test in ["all", "mq"]:
+ generate_contexts(mq_contexts, [], all_contexts)
+ return all_contexts
+
+
+def generate_contexts(contexts, context, all_contexts):
+ """
+ Recursive method to generate all possible contexts from a list of tuples
+ """
+ if len(contexts) == 0:
+ all_contexts.append(context[:])
+ return
+ for value in contexts[0][1]:
+ context.append((contexts[0][0], value))
+ generate_contexts(contexts[1:], context, all_contexts)
+ context.pop()
+
+
+def load_context(context):
+ """
+ Load a context either from a yaml file or from a json string
+ """
+ if os.path.exists(context):
+ with open(context) as f:
+ y = yaml.safe_load(f)
+ if "variables" not in y:
+ raise Exit(
+ f"Invalid context file: {context}, missing 'variables' key. Input file must be similar to tasks/unit-tests/testdata/gitlab_main_context_template.yml",
+ 1,
+ )
+ return [[(k, v) for k, v in y["variables"].items()]]
+ else:
+ try:
+ j = json.loads(context)
+ return [[(k, v) for k, v in j.items()]]
+ except json.JSONDecodeError:
+ raise Exit(f"Invalid context: {context}, must be a valid json, or a path to a yaml file", 1)
diff --git a/tasks/libs/common/utils.py b/tasks/libs/common/utils.py
index 2ecb55b36d502..c757ee2a412eb 100644
--- a/tasks/libs/common/utils.py
+++ b/tasks/libs/common/utils.py
@@ -474,10 +474,7 @@ def get_version(
pipeline_id=None,
include_git=False,
include_pre=True,
- fake_condition=True,
):
- if fake_condition:
- return "7.53.0"
version = ""
if pipeline_id is None:
pipeline_id = os.getenv("CI_PIPELINE_ID")
diff --git a/tasks/linter_tasks.py b/tasks/linter_tasks.py
index bee3afda4c2d6..7873b593a02c4 100644
--- a/tasks/linter_tasks.py
+++ b/tasks/linter_tasks.py
@@ -1,3 +1,4 @@
+import re
import sys
from collections import defaultdict
from typing import List
@@ -8,6 +9,13 @@
from tasks.flavor import AgentFlavor
from tasks.go import run_golangci_lint
from tasks.libs.common.check_tools_version import check_tools_version
+from tasks.libs.common.gitlab import (
+ Gitlab,
+ generate_gitlab_full_configuration,
+ get_gitlab_token,
+ get_preset_contexts,
+ load_context,
+)
from tasks.libs.common.utils import DEFAULT_BRANCH, color_message
from tasks.libs.copyright import CopyrightLinter
from tasks.modules import GoModule
@@ -237,3 +245,107 @@ def command(module_results, module: GoModule, module_result):
module_results.append(module_result)
return test_core(modules, flavor, ModuleLintResult, "golangci_lint", command, headless_mode=headless_mode)
+
+
+@task
+def list_ssm_parameters(_):
+ """
+ List all SSM parameters used in the datadog-agent repository.
+ """
+
+ ssm_owner = re.compile(r"^[A-Z].*_SSM_(NAME|KEY): (?P[^ ]+) +# +(?P.+)$")
+ ssm_params = defaultdict(list)
+ with open(".gitlab-ci.yml") as f:
+ for line in f:
+ m = ssm_owner.match(line.strip())
+ if m:
+ ssm_params[m.group("owner")].append(m.group("param"))
+ for owner in ssm_params.keys():
+ print(f"Owner:{owner}")
+ for param in ssm_params[owner]:
+ print(f" - {param}")
+
+
+@task
+def ssm_parameters(ctx):
+ """
+ Lint SSM parameters in the datadog-agent repository.
+ """
+ lint_folders = [".circleci", ".github", ".gitlab", "tasks", "test"]
+ repo_files = ctx.run("git ls-files", hide="both")
+ error_files = []
+ for file in repo_files.stdout.split("\n"):
+ if any(file.startswith(f) for f in lint_folders):
+ matched = is_get_parameter_call(file)
+ if matched:
+ error_files.append(matched)
+ if error_files:
+ print("The following files contain unexpected syntax for aws ssm get-parameter:")
+ for file in error_files:
+ print(f" - {file}")
+ raise Exit(code=1)
+
+
+class SSMParameterCall:
+ def __init__(self, file, line_nb, with_wrapper=False, with_env_var=False):
+ self.file = file
+ self.line_nb = line_nb
+ self.with_wrapper = with_wrapper
+ self.with_env_var = with_env_var
+
+ def __str__(self):
+ message = ""
+ if not self.with_wrapper:
+ message += "Please use the dedicated `aws_ssm_get_wrapper.(sh|ps1)`."
+ if not self.with_env_var:
+ message += " Save your parameter name as environment variable in .gitlab-ci.yml file."
+ return f"{self.file}:{self.line_nb+1}. {message}"
+
+ def __repr__(self):
+ return str(self)
+
+
+def is_get_parameter_call(file):
+ ssm_get = re.compile(r"^.+ssm.get.+$")
+ aws_ssm_call = re.compile(r"^.+ssm get-parameter.+--name +(?P[^ ]+).*$")
+ ssm_wrapper_call = re.compile(r"^.+aws_ssm_get_wrapper.(sh|ps1) +(?P[^ )]+).*$")
+ with open(file) as f:
+ try:
+ for nb, line in enumerate(f):
+ is_ssm_get = ssm_get.match(line.strip())
+ if is_ssm_get:
+ m = aws_ssm_call.match(line.strip())
+ if m:
+ return SSMParameterCall(
+ file, nb, with_wrapper=False, with_env_var=m.group("param").startswith("$")
+ )
+ m = ssm_wrapper_call.match(line.strip())
+ if m and not m.group("param").startswith("$"):
+ return SSMParameterCall(file, nb, with_wrapper=True, with_env_var=False)
+ except UnicodeDecodeError:
+ pass
+
+
+@task
+def gitlab_ci(_, test="all", custom_context=None):
+ """
+ Lint Gitlab CI files in the datadog-agent repository.
+ """
+ all_contexts = []
+ if custom_context:
+ all_contexts = load_context(custom_context)
+ else:
+ all_contexts = get_preset_contexts(test)
+ print(f"We will tests {len(all_contexts)} contexts.")
+ for context in all_contexts:
+ print("Test gitlab configuration with context: ", context)
+ config = generate_gitlab_full_configuration(".gitlab-ci.yml", dict(context))
+ gitlab = Gitlab(api_token=get_gitlab_token())
+ res = gitlab.lint(config)
+ status = color_message("valid", "green") if res["valid"] else color_message("invalid", "red")
+ print(f"Config is {status}")
+ if len(res["warnings"]) > 0:
+ print(color_message(f"Warnings: {res['warnings']}", "orange"), file=sys.stderr)
+ if not res["valid"]:
+ print(color_message(f"Errors: {res['errors']}", "red"), file=sys.stderr)
+ raise Exit(code=1)
diff --git a/tasks/notify.py b/tasks/notify.py
index 4ffedf155e575..7d582ff51f553 100644
--- a/tasks/notify.py
+++ b/tasks/notify.py
@@ -14,7 +14,6 @@
from tasks.libs.datadog_api import create_count, send_metrics
from tasks.libs.pipeline_data import get_failed_jobs
from tasks.libs.pipeline_notifications import (
- GITHUB_SLACK_MAP,
base_message,
check_for_missing_owners_slack_and_jira,
find_job_owners,
@@ -93,7 +92,7 @@ def send_message(_, notification_type="merge", print_to_stdout=False):
# Send messages
for owner, message in messages_to_send.items():
- channel = GITHUB_SLACK_MAP.get(owner.lower(), None)
+ channel = "#agent-agent6-ops"
message.base_message = base
if channel is None:
channel = "#datadog-agent-pipelines"
@@ -304,7 +303,7 @@ def send_notification(alert_jobs):
jobs = ", ".join(f"`{j}`" for j in alert_jobs["cumulative"])
message += f"Job(s) {jobs} failed {CUMULATIVE_THRESHOLD} times in last {CUMULATIVE_LENGTH} executions.\n"
if message:
- send_slack_message("#agent-platform-ops", message)
+ send_slack_message("#agent-agent6-ops", message)
@task
diff --git a/tasks/release.py b/tasks/release.py
index b226a8bfc1747..628da4bc0bc88 100644
--- a/tasks/release.py
+++ b/tasks/release.py
@@ -1575,9 +1575,9 @@ def cleanup(ctx):
@task
def check_omnibus_branches(ctx):
base_branch = _get_release_json_value('base_branch')
- if base_branch == 'main':
- omnibus_ruby_branch = 'datadog-5.5.0'
- omnibus_software_branch = 'master'
+ if base_branch == '6.53.x':
+ omnibus_ruby_branch = '6.53.x'
+ omnibus_software_branch = '6.53.x'
else:
omnibus_ruby_branch = base_branch
omnibus_software_branch = base_branch
diff --git a/tasks/unit-tests/notify_tests.py b/tasks/unit-tests/notify_tests.py
index a5ef7f801fb3c..cfe9198af08cb 100644
--- a/tasks/unit-tests/notify_tests.py
+++ b/tasks/unit-tests/notify_tests.py
@@ -134,7 +134,7 @@ def test_consecutive(self, mock_slack):
alert_jobs = {"consecutive": ["foo"], "cumulative": []}
notify.send_notification(alert_jobs)
mock_slack.assert_called_with(
- "#agent-platform-ops", f"Job(s) `foo` failed {notify.CONSECUTIVE_THRESHOLD} times in a row.\n"
+ "#agent-agent6-ops", f"Job(s) `foo` failed {notify.CONSECUTIVE_THRESHOLD} times in a row.\n"
)
@patch('tasks.notify.send_slack_message')
@@ -142,7 +142,7 @@ def test_cumulative(self, mock_slack):
alert_jobs = {"consecutive": [], "cumulative": ["bar", "baz"]}
notify.send_notification(alert_jobs)
mock_slack.assert_called_with(
- "#agent-platform-ops",
+ "#agent-agent6-ops",
f"Job(s) `bar`, `baz` failed {notify.CUMULATIVE_THRESHOLD} times in last {notify.CUMULATIVE_LENGTH} executions.\n",
)
@@ -151,7 +151,7 @@ def test_both(self, mock_slack):
alert_jobs = {"consecutive": ["foo"], "cumulative": ["bar", "baz"]}
notify.send_notification(alert_jobs)
mock_slack.assert_called_with(
- "#agent-platform-ops",
+ "#agent-agent6-ops",
f"Job(s) `foo` failed {notify.CONSECUTIVE_THRESHOLD} times in a row.\nJob(s) `bar`, `baz` failed {notify.CUMULATIVE_THRESHOLD} times in last {notify.CUMULATIVE_LENGTH} executions.\n",
)
diff --git a/tasks/unit-tests/testdata/codeowners.txt b/tasks/unit-tests/testdata/codeowners.txt
index ff398c0a09003..27879add4ff60 100644
--- a/tasks/unit-tests/testdata/codeowners.txt
+++ b/tasks/unit-tests/testdata/codeowners.txt
@@ -1,4 +1,4 @@
/.* @DataDog/agent-platform
/*.md @DataDog/agent-platform @DataDog/documentation
-/.gitlab/ @DataDog/agent-platform
-/.gitlab/security.yml @DataDog/agent-security
+.gitlab/ @DataDog/agent-platform
+.gitlab/security.yml @DataDog/agent-security
diff --git a/tasks/unit-tests/testdata/fake_gitlab-ci.yml b/tasks/unit-tests/testdata/fake_gitlab-ci.yml
index 27967ef7c523e..34eb1dc90366f 100644
--- a/tasks/unit-tests/testdata/fake_gitlab-ci.yml
+++ b/tasks/unit-tests/testdata/fake_gitlab-ci.yml
@@ -1,50 +1,50 @@
---
include:
- - /.gitlab/setup.yml
- - /.gitlab/shared.yml
- - /.gitlab/maintenance_jobs.yml
- - /.gitlab/deps_build.yml
- - /.gitlab/package_deps_build.yml
- - /.gitlab/deps_fetch.yml
- - /.gitlab/source_test.yml
- - /.gitlab/source_test_junit_upload.yml
- - /.gitlab/binary_build.yml
- - /.gitlab/cancel-prev-pipelines.yml
- - /.gitlab/do_not_merge.yml
- - /.gitlab/integration_test.yml
- - /.gitlab/package_build.yml
- - /.gitlab/kitchen_deploy.yml
- - /.gitlab/kitchen_testing.yml
- - /.gitlab/kitchen_tests_upload.yml
- - /.gitlab/new-e2e_testing.yml
- - /.gitlab/new-e2e_common/testing.yml
- - /.gitlab/install_script_testing.yml
- - /.gitlab/pkg_metrics.yml
- - /.gitlab/container_build.yml
- - /.gitlab/container_scan.yml
- - /.gitlab/check_deploy.yml
- - /.gitlab/dev_container_deploy.yml
- - /.gitlab/deploy_common.yml
- - /.gitlab/deploy_containers.yml
- - /.gitlab/deploy_packages.yml
- - /.gitlab/deploy_dca.yml
- - /.gitlab/choco_build.yml
- - /.gitlab/choco_deploy.yml
- - /.gitlab/internal_image_deploy.yml
- - /.gitlab/trigger_release.yml
- - /.gitlab/e2e.yml
- - /.gitlab/e2e_test_junit_upload.yml
- - /.gitlab/fakeintake.yml
- - /.gitlab/kitchen_cleanup.yml
- - /.gitlab/functional_test.yml
- - /.gitlab/functional_test_cleanup.yml
- - /.gitlab/functional_test_junit_upload.yml
- - /.gitlab/internal_kubernetes_deploy.yml
- - /.gitlab/notify.yml
- - /.gitlab/kitchen_common/cleanup.yml
- - /.gitlab/kitchen_common/testing.yml
- - /.gitlab/benchmarks/benchmarks.yml
- - /.gitlab/benchmarks/macrobenchmarks.yml
+ - .gitlab/setup.yml
+ - .gitlab/shared.yml
+ - .gitlab/maintenance_jobs.yml
+ - .gitlab/deps_build.yml
+ - .gitlab/package_deps_build.yml
+ - .gitlab/deps_fetch.yml
+ - .gitlab/source_test.yml
+ - .gitlab/source_test_junit_upload.yml
+ - .gitlab/binary_build.yml
+ - .gitlab/cancel-prev-pipelines.yml
+ - .gitlab/do_not_merge.yml
+ - .gitlab/integration_test.yml
+ - .gitlab/package_build.yml
+ - .gitlab/kitchen_deploy.yml
+ - .gitlab/kitchen_testing.yml
+ - .gitlab/kitchen_tests_upload.yml
+ - .gitlab/new-e2e_testing.yml
+ - .gitlab/new-e2e_common/testing.yml
+ - .gitlab/install_script_testing.yml
+ - .gitlab/pkg_metrics.yml
+ - .gitlab/container_build.yml
+ - .gitlab/container_scan.yml
+ - .gitlab/check_deploy.yml
+ - .gitlab/dev_container_deploy.yml
+ - .gitlab/deploy_common.yml
+ - .gitlab/deploy_containers.yml
+ - .gitlab/deploy_packages.yml
+ - .gitlab/deploy_dca.yml
+ - .gitlab/choco_build.yml
+ - .gitlab/choco_deploy.yml
+ - .gitlab/internal_image_deploy.yml
+ - .gitlab/trigger_release.yml
+ - .gitlab/e2e.yml
+ - .gitlab/e2e_test_junit_upload.yml
+ - .gitlab/fakeintake.yml
+ - .gitlab/kitchen_cleanup.yml
+ - .gitlab/functional_test.yml
+ - .gitlab/functional_test_cleanup.yml
+ - .gitlab/functional_test_junit_upload.yml
+ - .gitlab/internal_kubernetes_deploy.yml
+ - .gitlab/notify.yml
+ - .gitlab/kitchen_common/cleanup.yml
+ - .gitlab/kitchen_common/testing.yml
+ - .gitlab/benchmarks/benchmarks.yml
+ - .gitlab/benchmarks/macrobenchmarks.yml
default:
retry:
@@ -396,18 +396,18 @@ workflow:
- <<: *if_deploy
when: on_failure
-.on_deploy_a6:
+.on_deploy:
- <<: *if_not_version_6
when: never
- <<: *if_deploy
-.on_deploy_a6_failure:
+.on_deploy_failure:
- <<: *if_not_version_6
when: never
- <<: *if_deploy
when: on_failure
-.on_deploy_a6_rc:
+.on_deploy_rc:
- <<: *if_not_version_6
when: never
- <<: *if_not_deploy
@@ -419,7 +419,7 @@ workflow:
DSD_REPOSITORY: dogstatsd
IMG_REGISTRIES: public
-.on_deploy_a6_manual:
+.on_deploy_manual:
- <<: *if_not_version_6
when: never
- <<: *if_not_deploy
@@ -436,9 +436,9 @@ workflow:
AGENT_REPOSITORY: agent
IMG_REGISTRIES: public
-# Same as on_deploy_a6_manual, except the job would not run on pipelines
+# Same as on_deploy_manual, except the job would not run on pipelines
# using beta branch, it would only run for the final release.
-.on_deploy_a6_manual_final:
+.on_deploy_manual_final:
- <<: *if_not_version_6
when: never
- <<: *if_not_deploy
@@ -457,12 +457,12 @@ workflow:
AGENT_REPOSITORY: agent
IMG_REGISTRIES: public
-# This rule is a variation of on_deploy_a6_manual where
+# This rule is a variation of on_deploy_manual where
# the job is usually run manually, except when the pipeline
# builds an RC: in this case, the job is run automatically.
# This is done to reduce the number of manual steps that have
# to be done when creating RCs.
-.on_deploy_a6_manual_auto_on_rc:
+.on_deploy_manual_auto_on_rc:
- <<: *if_not_version_6
when: never
- <<: *if_not_deploy
@@ -630,7 +630,7 @@ workflow:
DSD_REPOSITORY: ci/datadog-agent/dogstatsd-release
IMG_REGISTRIES: internal-aws-ddbuild
-.on_deploy_nightly_repo_branch_a6:
+.on_deploy_nightly_repo_branch:
- <<: *if_not_version_6
when: never
- <<: *if_not_nightly_repo_branch
diff --git a/tasks/unit-tests/testdata/gitlab_main_context_template.yml b/tasks/unit-tests/testdata/gitlab_main_context_template.yml
new file mode 100644
index 0000000000000..eeac1dbd391bd
--- /dev/null
+++ b/tasks/unit-tests/testdata/gitlab_main_context_template.yml
@@ -0,0 +1,11 @@
+---
+variables:
+ BUCKET_BRANCH: 'dev'
+ CI_COMMIT_BRANCH: 'main'
+ CI_COMMIT_TAG: ''
+ CI_PIPELINE_SOURCE: 'pipeline'
+ DEPLOY_AGENT: 'true'
+ RUN_ALL_BUILDS: 'true'
+ RUN_E2E_TESTS: 'on'
+ RUN_KMT_TESTS: 'on'
+ RUN_UNIT_TESTS: 'true'
diff --git a/tasks/unit-tests/testdata/gitlab_mq_context_template.yml b/tasks/unit-tests/testdata/gitlab_mq_context_template.yml
new file mode 100644
index 0000000000000..29c05d73fcfb9
--- /dev/null
+++ b/tasks/unit-tests/testdata/gitlab_mq_context_template.yml
@@ -0,0 +1,10 @@
+---
+variables:
+ BUCKET_BRANCH: 'dev'
+ CI_COMMIT_BRANCH: 'mq-working-branch-main'
+ CI_PIPELINE_SOURCE: 'pipeline'
+ DEPLOY_AGENT: 'false'
+ RUN_ALL_BUILDS: 'false'
+ RUN_E2E_TESTS: 'auto'
+ RUN_KMT_TESTS: 'off'
+ RUN_UNIT_TESTS: 'off'
diff --git a/test/new-e2e/examples/kind_test.go b/test/new-e2e/examples/kind_test.go
index 5f2a5d87520c8..1845361001b6d 100644
--- a/test/new-e2e/examples/kind_test.go
+++ b/test/new-e2e/examples/kind_test.go
@@ -10,6 +10,8 @@ import (
"strings"
"testing"
+ corev1 "k8s.io/api/core/v1"
+
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
awskubernetes "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/kubernetes"
@@ -20,6 +22,7 @@ import (
compkube "github.com/DataDog/test-infra-definitions/components/kubernetes"
"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -29,12 +32,12 @@ type myKindSuite struct {
func TestMyKindSuite(t *testing.T) {
e2e.Run(t, &myKindSuite{}, e2e.WithProvisioner(
- awskubernetes.Provisioner(
+ awskubernetes.KindProvisioner(
awskubernetes.WithoutFakeIntake(),
- awskubernetes.WithWorkloadApp(func(e config.CommonEnvironment, kubeProvider *kubernetes.Provider) (*compkube.Workload, error) {
- return nginx.K8sAppDefinition(e, kubeProvider, "nginx", nil)
+ awskubernetes.WithWorkloadApp(func(e config.Env, kubeProvider *kubernetes.Provider) (*compkube.Workload, error) {
+ return nginx.K8sAppDefinition(e, kubeProvider, "nginx", "", false, nil)
}),
- awskubernetes.WithWorkloadApp(func(e config.CommonEnvironment, kubeProvider *kubernetes.Provider) (*compkube.Workload, error) {
+ awskubernetes.WithWorkloadApp(func(e config.Env, kubeProvider *kubernetes.Provider) (*compkube.Workload, error) {
return dogstatsd.K8sAppDefinition(e, kubeProvider, "dogstatsd", 8125, "/var/run/datadog/dsd.socket")
}),
)))
@@ -42,6 +45,7 @@ func TestMyKindSuite(t *testing.T) {
func (v *myKindSuite) TestClusterAgentInstalled() {
res, _ := v.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(context.TODO(), v1.ListOptions{})
+ var clusterAgent corev1.Pod
containsClusterAgent := false
for _, pod := range res.Items {
if strings.Contains(pod.Name, "cluster-agent") {
@@ -50,5 +54,9 @@ func (v *myKindSuite) TestClusterAgentInstalled() {
}
}
assert.True(v.T(), containsClusterAgent, "Cluster Agent not found")
- assert.Equal(v.T(), v.Env().Agent.InstallNameLinux, "dda")
+ stdout, stderr, err := v.Env().KubernetesCluster.KubernetesClient.
+ PodExec("datadog", clusterAgent.Name, "datadog-cluster-agent", []string{"ls"})
+ require.NoError(v.T(), err)
+ assert.Empty(v.T(), stderr)
+ assert.NotEmpty(v.T(), stdout)
}
diff --git a/test/new-e2e/examples/vmenv_withami_test.go b/test/new-e2e/examples/vmenv_withami_test.go
index e4fba9c4bc6ba..1bf202cd16d86 100644
--- a/test/new-e2e/examples/vmenv_withami_test.go
+++ b/test/new-e2e/examples/vmenv_withami_test.go
@@ -32,7 +32,7 @@ func TestVMSuiteEx2(t *testing.T) {
}
func (v *vmSuiteEx2) TestAmiMatch() {
- ec2Metadata := client.NewEC2Metadata(v.Env().RemoteHost)
+ ec2Metadata := client.NewEC2Metadata(v.T(), v.Env().RemoteHost.Host, v.Env().RemoteHost.OSFamily)
amiID := ec2Metadata.Get("ami-id")
assert.Equal(v.T(), amiID, "ami-05fab674de2157a80")
}
diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod
index f64ecbc4fd9b6..328aec214caea 100644
--- a/test/new-e2e/go.mod
+++ b/test/new-e2e/go.mod
@@ -1,6 +1,8 @@
module github.com/DataDog/datadog-agent/test/new-e2e
-go 1.21.9
+go 1.22.5
+
+toolchain go1.22.8
// Do not upgrade Pulumi plugins to versions different from `test-infra-definitions`.
// The plugin versions NEED to be aligned.
@@ -8,7 +10,9 @@ go 1.21.9
replace (
github.com/DataDog/datadog-agent/pkg/proto => ../../pkg/proto
+ github.com/DataDog/datadog-agent/pkg/util/optional => ../../pkg/util/optional
github.com/DataDog/datadog-agent/pkg/util/pointer => ../../pkg/util/pointer
+ github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../pkg/util/scrubber
github.com/DataDog/datadog-agent/pkg/util/testutil => ../../pkg/util/testutil
github.com/DataDog/datadog-agent/pkg/version => ../../pkg/version
github.com/DataDog/datadog-agent/test/fakeintake => ../fakeintake
@@ -27,9 +31,9 @@ require (
// `TEST_INFRA_DEFINITIONS_BUILDIMAGES` matches the commit sha in the module version
// Example: github.com/DataDog/test-infra-definitions v0.0.0-YYYYMMDDHHmmSS-0123456789AB
// => TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0123456789AB
- github.com/DataDog/test-infra-definitions v0.0.0-20240322160927-3eac4b5bb0c4
- github.com/aws/aws-sdk-go-v2 v1.25.2
- github.com/aws/aws-sdk-go-v2/config v1.27.6
+ github.com/DataDog/test-infra-definitions v0.0.0-20241104134504-0a48ed729822
+ github.com/aws/aws-sdk-go-v2 v1.32.2
+ github.com/aws/aws-sdk-go-v2/config v1.27.40
github.com/aws/aws-sdk-go-v2/service/ec2 v1.138.1
github.com/aws/aws-sdk-go-v2/service/eks v1.35.1
github.com/aws/aws-sdk-go-v2/service/ssm v1.44.1
@@ -40,13 +44,13 @@ require (
github.com/google/uuid v1.6.0
github.com/kr/pretty v0.3.1
github.com/pkg/sftp v1.13.6
- github.com/pulumi/pulumi/sdk/v3 v3.108.1
- github.com/samber/lo v1.39.0
+ github.com/pulumi/pulumi/sdk/v3 v3.137.0
+ github.com/samber/lo v1.47.0
github.com/sethvargo/go-retry v0.2.4
github.com/stretchr/testify v1.9.0
- golang.org/x/crypto v0.21.0
- golang.org/x/sys v0.18.0
- golang.org/x/term v0.18.0
+ golang.org/x/crypto v0.25.0
+ golang.org/x/sys v0.22.0
+ golang.org/x/term v0.22.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/zorkian/go-datadog-api.v2 v2.30.0
k8s.io/api v0.28.4
@@ -57,7 +61,7 @@ require (
)
require (
- dario.cat/mergo v1.0.0 // indirect
+ dario.cat/mergo v1.0.1 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/DataDog/datadog-agent/pkg/proto v0.53.0
github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a // indirect
@@ -71,26 +75,26 @@ require (
github.com/agext/levenshtein v1.2.3 // indirect
github.com/alessio/shellescape v1.4.2 // indirect
github.com/atotto/clipboard v0.1.4 // indirect
- github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1 // indirect
- github.com/aws/aws-sdk-go-v2/credentials v1.17.6 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 // indirect
- github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
- github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.2 // indirect
- github.com/aws/aws-sdk-go-v2/service/ecs v1.41.1
- github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.4 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.4 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.2 // indirect
- github.com/aws/aws-sdk-go-v2/service/s3 v1.51.3
- github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.28.3 // indirect
- github.com/aws/smithy-go v1.20.1 // indirect
+ github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.17.38 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.19 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ecs v1.47.4
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.65.0
+ github.com/aws/aws-sdk-go-v2/service/sso v1.23.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.31.4 // indirect
+ github.com/aws/smithy-go v1.22.0 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/blang/semver v3.5.1+incompatible // indirect
- github.com/cenkalti/backoff/v4 v4.2.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/charmbracelet/bubbles v0.18.0 // indirect
github.com/charmbracelet/bubbletea v0.25.0 // indirect
@@ -113,14 +117,14 @@ require (
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.5.0 // indirect
- github.com/go-git/go-git/v5 v5.11.0 // indirect
- github.com/go-logr/logr v1.2.4 // indirect
+ github.com/go-git/go-git/v5 v5.12.0 // indirect
+ github.com/go-logr/logr v1.4.1 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/glog v1.2.0 // indirect
+ github.com/golang/glog v1.2.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/btree v1.0.1 // indirect
@@ -132,7 +136,7 @@ require (
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
- github.com/hashicorp/hcl/v2 v2.20.0 // indirect
+ github.com/hashicorp/hcl/v2 v2.20.1 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
@@ -175,50 +179,49 @@ require (
github.com/pkg/term v1.1.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/pulumi/appdash v0.0.0-20231130102222-75f619a67231 // indirect
- github.com/pulumi/esc v0.8.2 // indirect
- github.com/pulumi/pulumi-command/sdk v0.9.2 // indirect
- github.com/pulumi/pulumi-libvirt/sdk v0.4.4 // indirect
+ github.com/pulumi/esc v0.10.0 // indirect
+ github.com/pulumi/pulumi-command/sdk v1.0.1 // indirect
+ github.com/pulumi/pulumi-libvirt/sdk v0.4.7 // indirect
// pulumi-random v4.14.0 uses GO 1.21:
// https://github.com/pulumi/pulumi-random/blob/v4.14.0/sdk/go.mod#L3
// So, do not upgrade pulumi-random to v4.14.0 or above before migration to GO 1.21.
- github.com/pulumi/pulumi-random/sdk/v4 v4.16.0 // indirect
+ github.com/pulumi/pulumi-random/sdk/v4 v4.16.6 // indirect
github.com/pulumi/pulumi-tls/sdk/v4 v4.11.1 // indirect
- github.com/pulumiverse/pulumi-time/sdk v0.0.0-20231010123146-089d7304da13 // indirect
+ github.com/pulumiverse/pulumi-time/sdk v0.1.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rogpeppe/go-internal v1.12.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect
- github.com/sergi/go-diff v1.3.1 // indirect
+ github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
- github.com/skeema/knownhosts v1.2.1 // indirect
+ github.com/skeema/knownhosts v1.2.2 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/spf13/cobra v1.8.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/texttheater/golang-levenshtein v1.0.1 // indirect
github.com/tinylib/msgp v1.1.8 // indirect
- github.com/tweekmonster/luser v0.0.0-20161003172636-3fa38070dbd7 // indirect
github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect
github.com/uber/jaeger-lib v2.4.1+incompatible // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
- github.com/zclconf/go-cty v1.14.3 // indirect
- github.com/zorkian/go-datadog-api v2.30.0+incompatible
+ github.com/zclconf/go-cty v1.14.4 // indirect
+ github.com/zorkian/go-datadog-api v2.30.0+incompatible // indirect
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
go.uber.org/atomic v1.11.0 // indirect
- golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect
- golang.org/x/mod v0.16.0 // indirect
- golang.org/x/net v0.22.0 // indirect
- golang.org/x/oauth2 v0.16.0 // indirect
- golang.org/x/sync v0.6.0 // indirect
- golang.org/x/text v0.14.0
- golang.org/x/time v0.3.0 // indirect
- golang.org/x/tools v0.19.0 // indirect
+ golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 // indirect
+ golang.org/x/mod v0.18.0 // indirect
+ golang.org/x/net v0.27.0 // indirect
+ golang.org/x/oauth2 v0.18.0 // indirect
+ golang.org/x/sync v0.7.0 // indirect
+ golang.org/x/text v0.16.0
+ golang.org/x/time v0.5.0 // indirect
+ golang.org/x/tools v0.22.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240304212257-790db918fca8 // indirect
- google.golang.org/grpc v1.62.1 // indirect
- google.golang.org/protobuf v1.33.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect
+ google.golang.org/grpc v1.63.2 // indirect
+ google.golang.org/protobuf v1.34.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
@@ -237,13 +240,24 @@ require (
)
require (
- github.com/pulumi/pulumi-aws/sdk/v6 v6.25.0
- github.com/pulumi/pulumi-awsx/sdk/v2 v2.5.0
- github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.9.0
+ github.com/DataDog/datadog-agent/pkg/util/scrubber v0.58.2
+ github.com/pulumi/pulumi-aws/sdk/v6 v6.56.1
+ github.com/pulumi/pulumi-awsx/sdk/v2 v2.16.1
+ github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.17.1
)
require (
+ github.com/BurntSushi/toml v1.2.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/optional v0.55.2 // indirect
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
- github.com/pulumi/pulumi-docker/sdk/v4 v4.5.1 // indirect
- github.com/pulumi/pulumi-eks/sdk/v2 v2.2.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ecr v1.36.2 // indirect
+ github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.67.0 // indirect
+ github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.56.0 // indirect
+ github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.67.0 // indirect
+ github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.67.0 // indirect
+ github.com/pulumi/pulumi-azure-native-sdk/v2 v2.67.0 // indirect
+ github.com/pulumi/pulumi-docker/sdk/v4 v4.5.5 // indirect
+ github.com/pulumi/pulumi-eks/sdk/v2 v2.7.8 // indirect
+ github.com/pulumi/pulumi-gcp/sdk/v6 v6.67.1 // indirect
+ github.com/pulumi/pulumi-gcp/sdk/v7 v7.38.0 // indirect
)
diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum
index c123d57f9d4d7..91cfe130d9d6c 100644
--- a/test/new-e2e/go.sum
+++ b/test/new-e2e/go.sum
@@ -1,9 +1,11 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
-dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
+dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
+github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/DataDog/agent-payload/v5 v5.0.106 h1:A3dGX+JYoL7OJe2crpxznW7hWxLxhOk/17WbYskRWVk=
github.com/DataDog/agent-payload/v5 v5.0.106/go.mod h1:COngtbYYCncpIPiE5D93QlXDH/3VAKk10jDNwGHcMRE=
github.com/DataDog/datadog-api-client-go v1.16.0 h1:5jOZv1m98criCvYTa3qpW8Hzv301nbZX3K9yJtwGyWY=
@@ -12,8 +14,8 @@ github.com/DataDog/datadog-api-client-go/v2 v2.19.0 h1:Wvz/63/q39EpVwSH1T8jVyRvP
github.com/DataDog/datadog-api-client-go/v2 v2.19.0/go.mod h1:oD5Lx8Li3oPRa/BSBenkn4i48z+91gwYORF/+6ph71g=
github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a h1:m9REhmyaWD5YJ0P53ygRHxKKo+KM+nw+zz0hEdKztMo=
github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg=
-github.com/DataDog/test-infra-definitions v0.0.0-20240322160927-3eac4b5bb0c4 h1:dJAaa0h6EgC4q8Mi271rMHVAiQ3OBUR/VlLNQCmJq0Y=
-github.com/DataDog/test-infra-definitions v0.0.0-20240322160927-3eac4b5bb0c4/go.mod h1:KNF9SeKFoqxSSucHpuXQ1QDmpi7HFS9yr5kM2h9ls3c=
+github.com/DataDog/test-infra-definitions v0.0.0-20241104134504-0a48ed729822 h1:bftFzcjeK8zyScbXP8+ifHfRCPQb14xHy3JD0ogVwmo=
+github.com/DataDog/test-infra-definitions v0.0.0-20241104134504-0a48ed729822/go.mod h1:l0n0FQYdWWQxbI5a2EkuynRQIteUQcYOaOhdxD9TvJs=
github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A=
@@ -43,50 +45,52 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
-github.com/aws/aws-sdk-go-v2 v1.25.2 h1:/uiG1avJRgLGiQM9X3qJM8+Qa6KRGK5rRPuXE0HUM+w=
-github.com/aws/aws-sdk-go-v2 v1.25.2/go.mod h1:Evoc5AsmtveRt1komDwIsjHFyrP5tDuF1D1U+6z6pNo=
-github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1 h1:gTK2uhtAPtFcdRRJilZPx8uJLL2J85xK11nKtWL0wfU=
-github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1/go.mod h1:sxpLb+nZk7tIfCWChfd+h4QwHNUR57d8hA1cleTkjJo=
-github.com/aws/aws-sdk-go-v2/config v1.27.6 h1:WmoH1aPrxwcqAZTTnETjKr+fuvqzKd4hRrKxQUiuKP4=
-github.com/aws/aws-sdk-go-v2/config v1.27.6/go.mod h1:W9RZFF2pL+OhnUSZsQS/eDMWD8v+R+yWgjj3nSlrXVU=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.6 h1:akhj/nSC6SEx3OmiYGG/7mAyXMem9ZNVVf+DXkikcTk=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.6/go.mod h1:chJZuJ7TkW4kiMwmldOJOEueBoSkUb4ynZS1d9dhygo=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 h1:AK0J8iYBFeUk2Ax7O8YpLtFsfhdOByh2QIkHmigpRYk=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2/go.mod h1:iRlGzMix0SExQEviAyptRWRGdYNo3+ufW/lCzvKVTUc=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 h1:bNo4LagzUKbjdxE0tIcR9pMzLR2U/Tgie1Hq1HQ3iH8=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2/go.mod h1:wRQv0nN6v9wDXuWThpovGQjqF1HFdcgWjporw14lS8k=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 h1:EtOU5jsPdIQNP+6Q2C5e3d65NKT1PeCiQk+9OdzO12Q=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2/go.mod h1:tyF5sKccmDz0Bv4NrstEr+/9YkSPJHrcO7UsUKf7pWM=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.2 h1:en92G0Z7xlksoOylkUhuBSfJgijC7rHVLRdnIlHEs0E=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.2/go.mod h1:HgtQ/wN5G+8QSlK62lbOtNwQ3wTSByJ4wH2rCkPt+AE=
+github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI=
+github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 h1:pT3hpW0cOHRJx8Y0DfJUEQuqPild8jRGmSFmBgvydr0=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6/go.mod h1:j/I2++U0xX+cr44QjHay4Cvxj6FUbnxrgmqN3H1jTZA=
+github.com/aws/aws-sdk-go-v2/config v1.27.40 h1:sie4mPBGFOO+Z27+yHzvyN31G20h/bf2xb5mCbpLv2Q=
+github.com/aws/aws-sdk-go-v2/config v1.27.40/go.mod h1:4KW7Aa5tNo+0VHnuLnnE1vPHtwMurlNZNS65IdcewHA=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.38 h1:iM90eRhCeZtlkzCNCG1JysOzJXGYf5rx80aD1lUgNDU=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.38/go.mod h1:TCVYPZeQuLaYNEkf/TVn6k5k/zdVZZ7xH9po548VNNg=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 h1:C/d03NAmh8C4BZXhuRNboF/DqhBkBCeDiJDcaqIT5pA=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14/go.mod h1:7I0Ju7p9mCIdlrfS+JCgqcYD0VXz/N4yozsox+0o078=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.19 h1:FKdiFzTxlTRO71p0C7VrLbkkdW8qfMKF5+ej6bTmkT0=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.19/go.mod h1:abO3pCj7WLQPTllnSeYImqFfkGrmJV0JovWo/gqT5N0=
github.com/aws/aws-sdk-go-v2/service/ec2 v1.138.1 h1:ToFONzxcc0i0xp9towBF/aVy8qwqGSs3siKoOZiYEMk=
github.com/aws/aws-sdk-go-v2/service/ec2 v1.138.1/go.mod h1:lTBYr5XTnzQ+fG7EdenYlhrDifjdGJ/Lxul24zeuTNU=
-github.com/aws/aws-sdk-go-v2/service/ecs v1.41.1 h1:h1oi77d7nGeM7DvResjebSnhdBVJZefd/eCT+DGjhY4=
-github.com/aws/aws-sdk-go-v2/service/ecs v1.41.1/go.mod h1:1yaOxYWYHZtn7CLrHCJWjzHcazl/EVsRIcNfIsBLg3I=
+github.com/aws/aws-sdk-go-v2/service/ecr v1.36.2 h1:VDQaVwGOokbd3VUbHF+wupiffdrbAZPdQnr5XZMJqrs=
+github.com/aws/aws-sdk-go-v2/service/ecr v1.36.2/go.mod h1:lvUlMghKYmSxSfv0vU7pdU/8jSY+s0zpG8xXhaGKCw0=
+github.com/aws/aws-sdk-go-v2/service/ecs v1.47.4 h1:CTkPGE8fiElvLtYWl/U+Eu5+1fVXiZbJUjyVCRSRgxk=
+github.com/aws/aws-sdk-go-v2/service/ecs v1.47.4/go.mod h1:sMFLFhL27cKYa/eQYZp4asvIwHsnJWrAzTUpy9AQdnU=
github.com/aws/aws-sdk-go-v2/service/eks v1.35.1 h1:qaPIfeZlp+hE5QlEhkTl4zVWvBOaUN/qYgPtSinl9NM=
github.com/aws/aws-sdk-go-v2/service/eks v1.35.1/go.mod h1:palnwFpS00oHlkjnWiwh6HKqtKyJSc90X54t3gKqrVU=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.4 h1:J3Q6N2sTChfYLZSTey3Qeo7n3JSm6RTJDcKev+7Sbus=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.4/go.mod h1:ZopsdDMVg1H03X7BdzpGaufOkuz27RjtKDzioP2U0Hg=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.4 h1:jRiWxyuVO8PlkN72wDMVn/haVH4SDCBkUt0Lf/dxd7s=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.4/go.mod h1:Ru7vg1iQ7cR4i7SZ/JTLYN9kaXtbL69UdgG0OQWQxW0=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.2 h1:1oY1AVEisRI4HNuFoLdRUB0hC63ylDAN6Me3MrfclEg=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.2/go.mod h1:KZ03VgvZwSjkT7fOetQ/wF3MZUvYFirlI1H5NklUNsY=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.51.3 h1:7cR4xxS480TI0R6Bd75g9Npdw89VriquvQPlMNmuds4=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.51.3/go.mod h1:zb72GZ2MvfCX5ynVJ+Mc/NCx7hncbsko4NZm5E+p6J4=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.0 h1:FQNWhRuSq8QwW74GtU0MrveNhZbqvHsA4dkA9w8fTDQ=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.0/go.mod h1:j/zZ3zmWfGCK91K73YsfHP53BSTLSjL/y6YN39XbBLM=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.0 h1:AdbiDUgQZmM28rDIZbiSwFxz8+3B94aOXxzs6oH+EA0=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.0/go.mod h1:uV476Bd80tiDTX4X2redMtagQUg65aU/gzPojSJ4kSI=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.0 h1:1NKXS8XfhMM0bg5wVYa/eOH8AM2f6JijugbKEyQFTIg=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.0/go.mod h1:ph931DUfVfgrhZR7py9olSvHCiRpvaGxNvlWBcXxFds=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.65.0 h1:2dSm7frMrw2tdJ0QvyccQNJyPGaP24dyDgZ6h1QJMGU=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.65.0/go.mod h1:4XSVpw66upN8wND3JZA29eXl2NOZvfFVq7DIP6xvfuQ=
github.com/aws/aws-sdk-go-v2/service/ssm v1.44.1 h1:LwoTceR/pj+zzIuVrBrESQ5K8N0T0F3agz+yUXIoVxA=
github.com/aws/aws-sdk-go-v2/service/ssm v1.44.1/go.mod h1:N/ISupi87tK6YpOxPDTmF7i6qedc0HYPiUuUY8zU6RI=
-github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 h1:utEGkfdQ4L6YW/ietH7111ZYglLJvS+sLriHJ1NBJEQ=
-github.com/aws/aws-sdk-go-v2/service/sso v1.20.1/go.mod h1:RsYqzYr2F2oPDdpy+PdhephuZxTfjHQe7SOBcZGoAU8=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 h1:9/GylMS45hGGFCcMrUZDVayQE1jYSIN6da9jo7RAYIw=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1/go.mod h1:YjAPFn4kGFqKC54VsHs5fn5B6d+PCY2tziEa3U/GB5Y=
-github.com/aws/aws-sdk-go-v2/service/sts v1.28.3 h1:TkiFkSVX990ryWIMBCT4kPqZEgThQe1xPU/AQXavtvU=
-github.com/aws/aws-sdk-go-v2/service/sts v1.28.3/go.mod h1:xYNauIUqSuvzlPVb3VB5no/n48YGhmlInD3Uh0Co8Zc=
-github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw=
-github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
+github.com/aws/aws-sdk-go-v2/service/sso v1.23.4 h1:ck/Y8XWNR1gHa4BFkwE3oSu7XDJGwl+8TI7E/RB2EcQ=
+github.com/aws/aws-sdk-go-v2/service/sso v1.23.4/go.mod h1:XRlMvmad0ZNL+75C5FYdMvbbLkd6qiqz6foR1nA1PXY=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.4 h1:4f2/JKYZHAZbQ7koBpZ012bKi32NHPY0m7TDuJgsbug=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.4/go.mod h1:FnvDM4sfa+isJ3kDXIzAB9GAwVSzFzSy97uZ3IsHo4E=
+github.com/aws/aws-sdk-go-v2/service/sts v1.31.4 h1:uK6dUUdJtqutK1XO/tmNaQMJiPLCJY/eAeOOmqQ6ygY=
+github.com/aws/aws-sdk-go-v2/service/sts v1.31.4/go.mod h1:yMWe0F+XG0DkRZK5ODZhG7BEFYhLXi2dqGsv6tX0cgI=
+github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM=
+github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
@@ -98,8 +102,8 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
-github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
-github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -165,8 +169,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw=
github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
-github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
-github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4=
+github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE=
+github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
@@ -175,11 +179,11 @@ github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
-github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
-github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
+github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys=
+github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
@@ -194,8 +198,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
-github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
+github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4=
+github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
@@ -243,8 +247,8 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
-github.com/hashicorp/hcl/v2 v2.20.0 h1:l++cRs/5jQOiKVvqXZm/P1ZEfVXJmvLS9WSVxkaeTb4=
-github.com/hashicorp/hcl/v2 v2.20.0/go.mod h1:WmcD/Ym72MDOOx5F62Ly+leloeu6H7m0pG7VBiU6pQk=
+github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc=
+github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
@@ -327,8 +331,8 @@ github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=
github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc=
github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM=
-github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
-github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
+github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE=
+github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
@@ -366,30 +370,44 @@ github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwa
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
github.com/pulumi/appdash v0.0.0-20231130102222-75f619a67231 h1:vkHw5I/plNdTr435cARxCW6q9gc0S/Yxz7Mkd38pOb0=
github.com/pulumi/appdash v0.0.0-20231130102222-75f619a67231/go.mod h1:murToZ2N9hNJzewjHBgfFdXhZKjY3z5cYC1VXk+lbFE=
-github.com/pulumi/esc v0.8.2 h1:+PZg+qAWW9SYrRCHex36QNueAWdxz9b7hi/q/Zb31V0=
-github.com/pulumi/esc v0.8.2/go.mod h1:v5VAPxYDa9DRwvubbzKt4ZYf5y0esWC2ccSp/AT923I=
-github.com/pulumi/pulumi-aws/sdk/v6 v6.25.0 h1:KstWR3AnkXD72ow0xxOzsAkihF+KdzddapHUy0CK2mU=
-github.com/pulumi/pulumi-aws/sdk/v6 v6.25.0/go.mod h1:Ar4SJq3jbKLps3879H5ZvwUt/VnFp/GKbWw1mhjeQek=
-github.com/pulumi/pulumi-awsx/sdk/v2 v2.5.0 h1:sCzgswv1p7G8RUkvUjDgDnrdi7vBRxTtA8Hwtoqabsc=
-github.com/pulumi/pulumi-awsx/sdk/v2 v2.5.0/go.mod h1:lv+hzv8kilWjMNOPcJS8cddJa51d3IdCOPY7cNd2NuU=
-github.com/pulumi/pulumi-command/sdk v0.9.2 h1:2siCFR8pS2sSwXkeWiLrprGEtBL54FsHTzdyl125UuI=
-github.com/pulumi/pulumi-command/sdk v0.9.2/go.mod h1:VeUXTI/iTgKVjRChRJbLRlBVGxAH+uymscfwzBC2VqY=
-github.com/pulumi/pulumi-docker/sdk/v4 v4.5.1 h1:gyuuECcHaPPop7baKfjapJJYnra6s/KdG4QITGu0kAI=
-github.com/pulumi/pulumi-docker/sdk/v4 v4.5.1/go.mod h1:BL+XtKTgkbtt03wA9SOQWyGjl4cIA7BjSHFjvFY+f9U=
-github.com/pulumi/pulumi-eks/sdk/v2 v2.2.1 h1:hVRA7WcxNhnJkfVrd45DTMNPhY26OUABVQCpjZMugMA=
-github.com/pulumi/pulumi-eks/sdk/v2 v2.2.1/go.mod h1:OmbVihWsmsvmn3dr13N9C5cGS3Mos7HWF/R30cx8xtw=
-github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.9.0 h1:Rh46xPvAnXc+v9GV6k9k3+MB3zv4n6izGChughLdqbI=
-github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.9.0/go.mod h1:ACRn9pxZG+syE7hstPKcPt5k98/r6ddUrv1uZOrIyTA=
-github.com/pulumi/pulumi-libvirt/sdk v0.4.4 h1:lJ8YerR7js6f8Gr6HeBOv44evbH44lkWo1RpjJVpe8M=
-github.com/pulumi/pulumi-libvirt/sdk v0.4.4/go.mod h1:lmskpjq1e1z2QwPrk9RyMS2SuAvPhG9QeuCQ3iCygNg=
-github.com/pulumi/pulumi-random/sdk/v4 v4.16.0 h1:H6gGA1hnprPB7SWC11giI93tVRxuSxeAteIuqtr6GHk=
-github.com/pulumi/pulumi-random/sdk/v4 v4.16.0/go.mod h1:poNUvMquwCDb7AqxqBBWcZEn6ADhoDPml2j43wZtzkU=
+github.com/pulumi/esc v0.10.0 h1:jzBKzkLVW0mePeanDRfqSQoCJ5yrkux0jIwAkUxpRKE=
+github.com/pulumi/esc v0.10.0/go.mod h1:2Bfa+FWj/xl8CKqRTWbWgDX0SOD4opdQgvYSURTGK2c=
+github.com/pulumi/pulumi-aws/sdk/v6 v6.56.1 h1:wA38Ep4sEphX+3YGwFfaxRHs7NQv8dNObFepX6jaRa4=
+github.com/pulumi/pulumi-aws/sdk/v6 v6.56.1/go.mod h1:m/ejZ2INurqq/ncDjJfgC1Ff/lnbt0J/uO33BnPVots=
+github.com/pulumi/pulumi-awsx/sdk/v2 v2.16.1 h1:6082hB+ILpPB/0V5F+LTmHbX1BO54tCVOQCVOL/FYI4=
+github.com/pulumi/pulumi-awsx/sdk/v2 v2.16.1/go.mod h1:z2bnBPHNYfk72IW1P01H9qikBtBSBhCwi3QpH6Y/38Q=
+github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.67.0 h1:mgmmbFEoc1YOu81K9Bl/MVWE8cGloEdiCeIw394vXcM=
+github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.67.0/go.mod h1:WmvulRFoc+dOk/el9y6u7z3CvA+yljL8HJXajmvZTYo=
+github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.56.0 h1:MFOd6X9FPlixzriy14fBHv7pFCCh/mu1pwHtSSjqfJ4=
+github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.56.0/go.mod h1:453Ff5wNscroYfq+zxME7Nbt7HdZv+dh0zLZwLyGBws=
+github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.67.0 h1:jvruQQSO1ESk7APFQ3mAge7C9SWKU9nbBHrilcyeSGU=
+github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.67.0/go.mod h1:d5nmekK1mrjM9Xo/JGGVlAs7mqqftBo3DmKji+1zbmw=
+github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.67.0 h1:r26Xl6FdOJnbLs1ny9ekuRjFxAocZK8jS8SLrgXKEFE=
+github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.67.0/go.mod h1:8yXZtmHe2Zet5pb8gZ7D730d0VAm4kYUdwCj7sjhz6g=
+github.com/pulumi/pulumi-azure-native-sdk/v2 v2.67.0 h1:FgfXLypiQ/DKWRPQpyNaftXcGl5HVgA93msBZTQ6Ddk=
+github.com/pulumi/pulumi-azure-native-sdk/v2 v2.67.0/go.mod h1:0y4wJUCX1eA3ZSn0jJIRXtHeJA7qgbPfkrR9qvj+5D4=
+github.com/pulumi/pulumi-command/sdk v1.0.1 h1:ZuBSFT57nxg/fs8yBymUhKLkjJ6qmyN3gNvlY/idiN0=
+github.com/pulumi/pulumi-command/sdk v1.0.1/go.mod h1:C7sfdFbUIoXKoIASfXUbP/U9xnwPfxvz8dBpFodohlA=
+github.com/pulumi/pulumi-docker/sdk/v4 v4.5.5 h1:7OjAfgLz5PAy95ynbgPAlWls5WBe4I/QW/61TdPWRlQ=
+github.com/pulumi/pulumi-docker/sdk/v4 v4.5.5/go.mod h1:XZKLFXbw13olxuztlWnmVUPYZp2a+BqzqhuMl0j/Ow8=
+github.com/pulumi/pulumi-eks/sdk/v2 v2.7.8 h1:NeCKFxyOLpAaG4pJDk7+ewnCuV2IbXR7PggYSNujOno=
+github.com/pulumi/pulumi-eks/sdk/v2 v2.7.8/go.mod h1:ARGNnIZENIpDUVSX21JEQJKrESj/0u0r0iT61rpb86I=
+github.com/pulumi/pulumi-gcp/sdk/v6 v6.67.1 h1:PUH/sUbJmBmHjNFNthJ/dW2+riFuJV0FhrGAwuUuRIg=
+github.com/pulumi/pulumi-gcp/sdk/v6 v6.67.1/go.mod h1:OmZeji3dNMwB1qldAlaQfcfJPc2BaZyweVGH7Ej4SJg=
+github.com/pulumi/pulumi-gcp/sdk/v7 v7.38.0 h1:21oSj+TKlKTzQcxN9Hik7iSNNHPUQXN4s3itOnahy/w=
+github.com/pulumi/pulumi-gcp/sdk/v7 v7.38.0/go.mod h1:YaEZms1NgXFqGhObKVofcAeWXu2V+3t/BAXdHQZq7fU=
+github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.17.1 h1:VDX+hu+qK3fbf2FodgG5kfh2h1bHK0FKirW1YqKWkRc=
+github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.17.1/go.mod h1:e69ohZtUePLLYNLXYgiOWp0FvRGg6ya/3fsq3o00nN0=
+github.com/pulumi/pulumi-libvirt/sdk v0.4.7 h1:/BBnqqx/Gbg2vINvJxXIVb58THXzw2lSqFqxlRSXH9M=
+github.com/pulumi/pulumi-libvirt/sdk v0.4.7/go.mod h1:VKvjhAm1sGtzKZruYwIhgascabEx7+oVVRCoxp/cPi4=
+github.com/pulumi/pulumi-random/sdk/v4 v4.16.6 h1:M9BSF13bQxj74C61nBTVITrsgT6oRR6cgudsKz7WOFU=
+github.com/pulumi/pulumi-random/sdk/v4 v4.16.6/go.mod h1:l5ew7S/G1GspPLH9KeWXqxQ4ZmS2hh2sEMv3bW9M3yc=
github.com/pulumi/pulumi-tls/sdk/v4 v4.11.1 h1:tXemWrzeVTqG8zq6hBdv1TdPFXjgZ+dob63a/6GlF1o=
github.com/pulumi/pulumi-tls/sdk/v4 v4.11.1/go.mod h1:hODo3iEmmXDFOXqPK+V+vwI0a3Ww7BLjs5Tgamp86Ng=
-github.com/pulumi/pulumi/sdk/v3 v3.108.1 h1:5idjc3JmzToYVizRPbFyjJ5UU4AbExd04pcSP9AhPEc=
-github.com/pulumi/pulumi/sdk/v3 v3.108.1/go.mod h1:5A6GHUwAJlRY1SSLZh84aDIbsBShcrfcmHzI50ecSBg=
-github.com/pulumiverse/pulumi-time/sdk v0.0.0-20231010123146-089d7304da13 h1:4U7DFIlSggj/4iLbis2Bckayed+OhaYKE7bncZwQCYI=
-github.com/pulumiverse/pulumi-time/sdk v0.0.0-20231010123146-089d7304da13/go.mod h1:NUa1zA74DF002WrM6iF111A6UjX9knPpXufVRvBwNyg=
+github.com/pulumi/pulumi/sdk/v3 v3.137.0 h1:bxhYpOY7Z4xt+VmezEpHuhjpOekkaMqOjzxFg/1OhCw=
+github.com/pulumi/pulumi/sdk/v3 v3.137.0/go.mod h1:PvKsX88co8XuwuPdzolMvew5lZV+4JmZfkeSjj7A6dI=
+github.com/pulumiverse/pulumi-time/sdk v0.1.0 h1:xfi9HKDgV+GgDxQ23oSv9KxC3DQqViGTcMrJICRgJv0=
+github.com/pulumiverse/pulumi-time/sdk v0.1.0/go.mod h1:NUa1zA74DF002WrM6iF111A6UjX9knPpXufVRvBwNyg=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
@@ -401,19 +419,19 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI=
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs=
-github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA=
-github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA=
+github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc=
+github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU=
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4=
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
-github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
-github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
+github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
+github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
-github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=
-github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
+github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A=
+github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
@@ -440,8 +458,6 @@ github.com/texttheater/golang-levenshtein v1.0.1 h1:+cRNoVrfiwufQPhoMzB6N0Yf/Mqa
github.com/texttheater/golang-levenshtein v1.0.1/go.mod h1:PYAKrbF5sAiq9wd+H82hs7gNaen0CplQ9uvm6+enD/8=
github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
-github.com/tweekmonster/luser v0.0.0-20161003172636-3fa38070dbd7 h1:X9dsIWPuuEJlPX//UmRKophhOKCGXc46RVIGuttks68=
-github.com/tweekmonster/luser v0.0.0-20161003172636-3fa38070dbd7/go.mod h1:UxoP3EypF8JfGEjAII8jx1q8rQyDnX8qdTCs/UQBVIE=
github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg=
@@ -457,8 +473,8 @@ github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-github.com/zclconf/go-cty v1.14.3 h1:1JXy1XroaGrzZuG6X9dt7HL6s9AwbY+l4UNL8o5B6ho=
-github.com/zclconf/go-cty v1.14.3/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
+github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8=
+github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
github.com/zorkian/go-datadog-api v2.30.0+incompatible h1:R4ryGocppDqZZbnNc5EDR8xGWF/z/MxzWnqTUijDQes=
github.com/zorkian/go-datadog-api v2.30.0+incompatible/go.mod h1:PkXwHX9CUQa/FpB9ZwAD45N1uhCW4MT/Wj7m36PbKss=
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY=
@@ -473,11 +489,11 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
-golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
-golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
+golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ=
-golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
+golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 h1:LoYXNGAShUG3m/ehNk4iFctuhGX/+R1ZpfJ4/ia80JM=
+golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@@ -488,8 +504,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
-golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
+golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -507,11 +523,11 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
-golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
-golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
+golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
-golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
+golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
+golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -519,8 +535,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
-golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -546,8 +562,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
-golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
+golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -556,8 +572,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
-golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
-golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
+golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
+golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -567,10 +583,10 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
-golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
+golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
+golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
+golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -584,8 +600,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
-golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
+golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
+golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -597,13 +613,13 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240304212257-790db918fca8 h1:IR+hp6ypxjH24bkMfEJ0yHR21+gwPWdV+/IBrPQyn3k=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240304212257-790db918fca8/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
-google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
+google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
+google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -614,8 +630,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
-google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4=
+google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
diff --git a/test/new-e2e/pkg/components/docker_agent.go b/test/new-e2e/pkg/components/docker_agent.go
index 6d9bd8bf7190e..deacbdab7d9ca 100644
--- a/test/new-e2e/pkg/components/docker_agent.go
+++ b/test/new-e2e/pkg/components/docker_agent.go
@@ -6,7 +6,10 @@
package components
import (
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclient"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams"
"github.com/DataDog/test-infra-definitions/components/datadog/agent"
)
@@ -16,5 +19,14 @@ type DockerAgent struct {
agent.DockerAgentOutput
// Client cannot be initialized inline as it requires other information to create client
- Client agentclient.Agent
+ Client agentclient.Agent
+ ClientOptions []agentclientparams.Option
+}
+
+var _ e2e.Initializable = (*DockerAgent)(nil)
+
+// Init is called by e2e test Suite after the component is provisioned.
+func (a *DockerAgent) Init(ctx e2e.Context) (err error) {
+ a.Client, err = client.NewDockerAgentClient(ctx, a.DockerAgentOutput, a.ClientOptions...)
+ return err
}
diff --git a/test/new-e2e/pkg/components/ecs_cluster.go b/test/new-e2e/pkg/components/ecs_cluster.go
new file mode 100644
index 0000000000000..688cb8ed545ea
--- /dev/null
+++ b/test/new-e2e/pkg/components/ecs_cluster.go
@@ -0,0 +1,15 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package components
+
+import (
+ "github.com/DataDog/test-infra-definitions/components/ecs"
+)
+
+// ECSCluster is an ECS Cluster
+type ECSCluster struct {
+ ecs.ClusterOutput
+}
diff --git a/test/new-e2e/pkg/components/kubernetes_cluster.go b/test/new-e2e/pkg/components/kubernetes_cluster.go
index 6b3f3b9102587..618fc07e9e57b 100644
--- a/test/new-e2e/pkg/components/kubernetes_cluster.go
+++ b/test/new-e2e/pkg/components/kubernetes_cluster.go
@@ -9,6 +9,8 @@ import (
"time"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client"
+
"github.com/DataDog/test-infra-definitions/components/kubernetes"
kubeClient "k8s.io/client-go/kubernetes"
@@ -21,7 +23,7 @@ const kubeClientTimeout = 60 * time.Second
type KubernetesCluster struct {
kubernetes.ClusterOutput
- client kubeClient.Interface
+ KubernetesClient *client.KubernetesClient
}
var _ e2e.Initializable = &KubernetesCluster{}
@@ -37,7 +39,7 @@ func (kc *KubernetesCluster) Init(e2e.Context) error {
config.Timeout = kubeClientTimeout
// Create client
- kc.client, err = kubeClient.NewForConfig(config)
+ kc.KubernetesClient, err = client.NewKubernetesClient(config)
if err != nil {
return err
}
@@ -47,5 +49,5 @@ func (kc *KubernetesCluster) Init(e2e.Context) error {
// Client returns the Kubernetes client
func (kc *KubernetesCluster) Client() kubeClient.Interface {
- return kc.client
+ return kc.KubernetesClient.K8sClient
}
diff --git a/test/new-e2e/pkg/components/remotehost.go b/test/new-e2e/pkg/components/remotehost.go
index c70a9595befbd..1bca18704e1f9 100644
--- a/test/new-e2e/pkg/components/remotehost.go
+++ b/test/new-e2e/pkg/components/remotehost.go
@@ -6,243 +6,35 @@
package components
import (
- "context"
- "fmt"
- "io/fs"
- "net"
- "os"
- "strings"
- "time"
-
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/clients"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client"
osComp "github.com/DataDog/test-infra-definitions/components/os"
"github.com/DataDog/test-infra-definitions/components/remote"
-
- "github.com/stretchr/testify/require"
- "golang.org/x/crypto/ssh"
-)
-
-const (
- // Waiting for only 10s as we expect remote to be ready when provisioning
- sshRetryInterval = 2 * time.Second
- sshMaxRetries = 20
)
// RemoteHost represents a remote host
type RemoteHost struct {
remote.HostOutput
- client *ssh.Client
+ *client.Host
context e2e.Context
}
-var _ e2e.Initializable = &RemoteHost{}
+var _ e2e.Initializable = (*RemoteHost)(nil)
// Init is called by e2e test Suite after the component is provisioned.
-func (h *RemoteHost) Init(ctx e2e.Context) error {
+func (h *RemoteHost) Init(ctx e2e.Context) (err error) {
h.context = ctx
- return h.ReconnectSSH()
-}
-
-// Execute executes a command and returns an error if any.
-func (h *RemoteHost) Execute(command string, options ...ExecuteOption) (string, error) {
- var err error
- var output string
-
- params, err := optional.MakeParams(options...)
- if err != nil {
- return "", err
- }
-
- cmd := h.buildEnvVariables(command, params.EnvVariables)
- output, err = clients.ExecuteCommand(h.client, cmd)
-
- if err != nil && strings.Contains(err.Error(), "failed to create session:") {
- err = h.ReconnectSSH()
- if err != nil {
- return "", err
- }
- output, err = clients.ExecuteCommand(h.client, cmd)
- }
- if err != nil {
- return "", fmt.Errorf("%v: %v", output, err)
- }
-
- return output, nil
-}
-
-// MustExecute executes a command and returns its output.
-func (h *RemoteHost) MustExecute(command string, options ...ExecuteOption) string {
- output, err := h.Execute(command, options...)
- require.NoError(h.context.T(), err)
- return output
-}
-
-// CopyFile copy file to the remote host
-func (h *RemoteHost) CopyFile(src string, dst string) {
- dst = h.convertToForwardSlashOnWindows(dst)
- err := clients.CopyFile(h.client, src, dst)
- require.NoError(h.context.T(), err)
-}
-
-// CopyFolder copy a folder to the remote host
-func (h *RemoteHost) CopyFolder(srcFolder string, dstFolder string) {
- dstFolder = h.convertToForwardSlashOnWindows(dstFolder)
- err := clients.CopyFolder(h.client, srcFolder, dstFolder)
- require.NoError(h.context.T(), err)
-}
-
-// GetFile copy file from the remote host
-func (h *RemoteHost) GetFile(src string, dst string) error {
- src = h.convertToForwardSlashOnWindows(src)
- return clients.GetFile(h.client, src, dst)
-}
-
-// FileExists returns true if the file exists and is a regular file and returns an error if any
-func (h *RemoteHost) FileExists(path string) (bool, error) {
- path = h.convertToForwardSlashOnWindows(path)
- return clients.FileExists(h.client, path)
-}
-
-// ReadFile reads the content of the file, return bytes read and error if any
-func (h *RemoteHost) ReadFile(path string) ([]byte, error) {
- path = h.convertToForwardSlashOnWindows(path)
- return clients.ReadFile(h.client, path)
-}
-
-// WriteFile write content to the file and returns the number of bytes written and error if any
-func (h *RemoteHost) WriteFile(path string, content []byte) (int64, error) {
- path = h.convertToForwardSlashOnWindows(path)
- return clients.WriteFile(h.client, path, content)
-}
-
-// AppendFile append content to the file and returns the number of bytes written and error if any
-func (h *RemoteHost) AppendFile(os, path string, content []byte) (int64, error) {
- path = h.convertToForwardSlashOnWindows(path)
- return clients.AppendFile(h.client, os, path, content)
-}
-
-// ReadDir returns list of directory entries in path
-func (h *RemoteHost) ReadDir(path string) ([]fs.DirEntry, error) {
- path = h.convertToForwardSlashOnWindows(path)
- return clients.ReadDir(h.client, path)
-}
-
-// Lstat returns a FileInfo structure describing path.
-// if path is a symbolic link, the FileInfo structure describes the symbolic link.
-func (h *RemoteHost) Lstat(path string) (fs.FileInfo, error) {
- path = h.convertToForwardSlashOnWindows(path)
- return clients.Lstat(h.client, path)
-}
-
-// MkdirAll creates the specified directory along with any necessary parents.
-// If the path is already a directory, does nothing and returns nil.
-// Otherwise returns an error if any.
-func (h *RemoteHost) MkdirAll(path string) error {
- path = h.convertToForwardSlashOnWindows(path)
- return clients.MkdirAll(h.client, path)
-}
-
-// Remove removes the specified file or directory.
-// Returns an error if file or directory does not exist, or if the directory is not empty.
-func (h *RemoteHost) Remove(path string) error {
- path = h.convertToForwardSlashOnWindows(path)
- return clients.Remove(h.client, path)
-}
-
-// RemoveAll recursively removes all files/folders in the specified directory.
-// Returns an error if the directory does not exist.
-func (h *RemoteHost) RemoveAll(path string) error {
- path = h.convertToForwardSlashOnWindows(path)
- return clients.RemoveAll(h.client, path)
-}
-
-// DialRemotePort creates a connection to port on the remote host.
-func (h *RemoteHost) DialRemotePort(port uint16) (net.Conn, error) {
- // TODO: Use e2e context (see: https://github.com/DataDog/datadog-agent/pull/22261#discussion_r1477912456)
- return h.client.DialContext(context.Background(), "tcp", fmt.Sprintf("127.0.0.1:%d", port))
-}
-
-// ReconnectSSH recreate the SSH connection to the VM. Should be used only after VM reboot to restore the SSH connection.
-// Returns an error if the VM is not reachable after retries.
-func (h *RemoteHost) ReconnectSSH() error {
- h.context.T().Logf("connecting to remote VM at %s@%s", h.Username, h.Address)
-
- if h.client != nil {
- _ = h.client.Close()
- }
-
- var privateSSHKey []byte
- privateKeyPath, err := runner.GetProfile().ParamStore().GetWithDefault(parameters.PrivateKeyPath, "")
- if err != nil {
- return err
- }
-
- privateKeyPassword, err := runner.GetProfile().SecretStore().GetWithDefault(parameters.PrivateKeyPassword, "")
- if err != nil {
- return err
- }
-
- if privateKeyPath != "" {
- privateSSHKey, err = os.ReadFile(privateKeyPath)
- if err != nil {
- return err
- }
- }
-
- h.client, err = clients.GetSSHClient(
- h.Username,
- fmt.Sprintf("%s:%d", h.Address, 22),
- privateSSHKey,
- []byte(privateKeyPassword),
- sshRetryInterval,
- sshMaxRetries,
- )
+ h.Host, err = client.NewHost(ctx, h.HostOutput)
return err
}
-func (h *RemoteHost) buildEnvVariables(command string, envVar EnvVar) string {
- cmd := ""
- if h.OSFamily == osComp.WindowsFamily {
- envVarSave := map[string]string{}
- for envName, envValue := range envVar {
- previousEnvVar, err := h.Execute(fmt.Sprintf("$env:%s", envName))
- if err != nil || previousEnvVar == "" {
- previousEnvVar = "null"
- }
- envVarSave[envName] = previousEnvVar
-
- cmd += fmt.Sprintf("$env:%s='%s'; ", envName, envValue)
- }
- cmd += fmt.Sprintf("%s; ", command)
-
- // Restore env variables
- for envName := range envVar {
- cmd += fmt.Sprintf("$env:%s='%s'; ", envName, envVarSave[envName])
- }
- } else {
- for envName, envValue := range envVar {
- cmd += fmt.Sprintf("%s='%s' ", envName, envValue)
- }
- cmd += command
- }
- return cmd
-}
-
-// convertToForwardSlashOnWindows replaces backslashes in the path with forward slashes for Windows remote hosts.
-// The path is unchanged for non-Windows remote hosts.
-//
-// This is necessary for remote paths because the sftp package only supports forward slashes, regardless of the local OS.
-// The Windows SSH implementation does this conversion, too. Though we have an advantage in that we can check the OSFamily.
-// https://github.com/PowerShell/openssh-portable/blob/59aba65cf2e2f423c09d12ad825c3b32a11f408f/scp.c#L636-L650
-func (h *RemoteHost) convertToForwardSlashOnWindows(path string) string {
+// DownloadAgentLogs downloads the agent logs from the remote host
+func (h *RemoteHost) DownloadAgentLogs(localPath string) error {
+ agentLogsPath := "/var/log/datadog/agent.log"
if h.OSFamily == osComp.WindowsFamily {
- return strings.ReplaceAll(path, "\\", "/")
+ agentLogsPath = "C:/ProgramData/Datadog/Logs/agent.log"
}
- return path
+ return h.Host.GetFile(agentLogsPath, localPath)
}
diff --git a/test/new-e2e/pkg/components/remotehost_agent.go b/test/new-e2e/pkg/components/remotehost_agent.go
index 9081116015c81..b836eda7a2efe 100644
--- a/test/new-e2e/pkg/components/remotehost_agent.go
+++ b/test/new-e2e/pkg/components/remotehost_agent.go
@@ -6,7 +6,10 @@
package components
import (
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclient"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams"
"github.com/DataDog/test-infra-definitions/components/datadog/agent"
)
@@ -15,6 +18,14 @@ import (
type RemoteHostAgent struct {
agent.HostAgentOutput
- // Client cannot be initialized inline as it requires other information to create client
- Client agentclient.Agent
+ Client agentclient.Agent
+ ClientOptions []agentclientparams.Option
+}
+
+var _ e2e.Initializable = (*RemoteHostAgent)(nil)
+
+// Init is called by e2e test Suite after the component is provisioned.
+func (a *RemoteHostAgent) Init(ctx e2e.Context) (err error) {
+ a.Client, err = client.NewHostAgentClientWithParams(ctx, a.HostAgentOutput.Host, a.ClientOptions...)
+ return err
}
diff --git a/test/new-e2e/pkg/components/remotehost_docker.go b/test/new-e2e/pkg/components/remotehost_docker.go
new file mode 100644
index 0000000000000..f646949910771
--- /dev/null
+++ b/test/new-e2e/pkg/components/remotehost_docker.go
@@ -0,0 +1,27 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package components
+
+import (
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client"
+ "github.com/DataDog/test-infra-definitions/components/docker"
+)
+
+// RemoteHostDocker represents an Agent running directly on a Host
+type RemoteHostDocker struct {
+ docker.ManagerOutput
+
+ Client *client.Docker
+}
+
+var _ e2e.Initializable = (*RemoteHostDocker)(nil)
+
+// Init is called by e2e test Suite after the component is provisioned.
+func (d *RemoteHostDocker) Init(ctx e2e.Context) (err error) {
+ d.Client, err = client.NewDocker(ctx.T(), d.ManagerOutput)
+ return err
+}
diff --git a/test/new-e2e/pkg/e2e/provisioner.go b/test/new-e2e/pkg/e2e/provisioner.go
index dbcfbd7a1abd3..a537cc7a05ce4 100644
--- a/test/new-e2e/pkg/e2e/provisioner.go
+++ b/test/new-e2e/pkg/e2e/provisioner.go
@@ -10,6 +10,11 @@ import (
"io"
)
+// Diagnosable defines the interface for a diagnosable provider.
+type Diagnosable interface {
+ Diagnose(ctx context.Context, stackName string) (string, error)
+}
+
// Provisioner defines the interface for a provisioner.
type Provisioner interface {
ID() string
diff --git a/test/new-e2e/pkg/e2e/pulumi_provisioner.go b/test/new-e2e/pkg/e2e/pulumi_provisioner.go
index 6ce8a211ca558..ef8aba2b8d295 100644
--- a/test/new-e2e/pkg/e2e/pulumi_provisioner.go
+++ b/test/new-e2e/pkg/e2e/pulumi_provisioner.go
@@ -12,9 +12,10 @@ import (
"io"
"reflect"
+ "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/infra"
- "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
const (
@@ -26,9 +27,10 @@ type PulumiEnvRunFunc[Env any] func(ctx *pulumi.Context, env *Env) error
// PulumiProvisioner is a provisioner based on Pulumi with binding to an environment.
type PulumiProvisioner[Env any] struct {
- id string
- runFunc PulumiEnvRunFunc[Env]
- configMap runner.ConfigMap
+ id string
+ runFunc PulumiEnvRunFunc[Env]
+ configMap runner.ConfigMap
+ diagnoseFunc func(ctx context.Context, stackName string) (string, error)
}
var (
@@ -71,13 +73,13 @@ func (pp *PulumiProvisioner[Env]) ProvisionEnv(ctx context.Context, stackName st
_, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure(
ctx,
stackName,
- pp.configMap,
func(ctx *pulumi.Context) error {
return pp.runFunc(ctx, env)
},
- false,
- logger,
+ infra.WithConfigMap(pp.configMap),
+ infra.WithLogWriter(logger),
)
+
if err != nil {
return nil, err
}
@@ -90,7 +92,7 @@ func (pp *PulumiProvisioner[Env]) ProvisionEnv(ctx context.Context, stackName st
}
// Unfortunately we don't have access to Pulumi raw data
- marshalled, err := json.Marshal(value.Value)
+ marshalled, err := json.MarshalIndent(value.Value, "", "\t")
if err != nil {
return nil, fmt.Errorf("unable to marshal output key: %s, err: %w", key, err)
}
@@ -98,9 +100,36 @@ func (pp *PulumiProvisioner[Env]) ProvisionEnv(ctx context.Context, stackName st
resources[key] = marshalled
}
+ _, err = logger.Write([]byte(fmt.Sprintf("Pulumi stack %s successfully provisioned\nResources:\n%v\n\n", stackName, dumpRawResources(resources))))
+ if err != nil {
+ // Log the error but don't fail the provisioning
+ fmt.Printf("Failed to write log: %v\n", err)
+ }
+
return resources, nil
}
+func dumpRawResources(resources RawResources) string {
+ var res string
+ for key, value := range resources {
+ res += fmt.Sprintf("%s: %s\n", key, value)
+ }
+ return res
+}
+
+// Diagnose runs the diagnose function if it is set diagnoseFunc
+func (pp *PulumiProvisioner[Env]) Diagnose(ctx context.Context, stackName string) (string, error) {
+ if pp.diagnoseFunc != nil {
+ return pp.diagnoseFunc(ctx, stackName)
+ }
+ return "", nil
+}
+
+// SetDiagnoseFunc sets the diagnose function.
+func (pp *PulumiProvisioner[Env]) SetDiagnoseFunc(diagnoseFunc func(ctx context.Context, stackName string) (string, error)) {
+ pp.diagnoseFunc = diagnoseFunc
+}
+
// Destroy deletes the Pulumi stack.
func (pp *PulumiProvisioner[Env]) Destroy(ctx context.Context, stackName string, logger io.Writer) error {
return infra.GetStackManager().DeleteStack(ctx, stackName, logger)
diff --git a/test/new-e2e/pkg/e2e/suite.go b/test/new-e2e/pkg/e2e/suite.go
index 43e8c6e8773c1..e9963e0e8c9bd 100644
--- a/test/new-e2e/pkg/e2e/suite.go
+++ b/test/new-e2e/pkg/e2e/suite.go
@@ -41,6 +41,7 @@
//
// Note: By default, the BaseSuite test suite will delete the environment when the test suite finishes (whether it's successful or not).
// During development, it's highly recommended to use the [params.WithDevMode] option to prevent the environment from being deleted.
+// [params.WithDevMode] is automatically enabled when the `E2E_DEV_MODE` environment variable is set to `true`.
//
// # Organizing your tests
//
@@ -145,13 +146,17 @@ import (
"errors"
"fmt"
"reflect"
+ "sync"
"testing"
"time"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters"
"github.com/DataDog/test-infra-definitions/common/utils"
"github.com/DataDog/test-infra-definitions/components"
+ "gopkg.in/zorkian/go-datadog-api.v2"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/infra"
"github.com/stretchr/testify/suite"
)
@@ -180,13 +185,20 @@ var _ Suite[any] = &BaseSuite[any]{}
type BaseSuite[Env any] struct {
suite.Suite
- env *Env
- params suiteParams
+ env *Env
+ datadogClient *datadog.Client
+ params suiteParams
originalProvisioners ProvisionerMap
currentProvisioners ProvisionerMap
firstFailTest string
+ startTime time.Time
+ endTime time.Time
+ initOnly bool
+
+ testSessionOutputDir string
+ onceTestSessionOutputDir sync.Once
}
//
@@ -210,7 +222,6 @@ func (bs *BaseSuite[Env]) UpdateEnv(newProvisioners ...Provisioner) {
uniqueIDs[provisioner.ID()] = struct{}{}
targetProvisioners[provisioner.ID()] = provisioner
}
-
if err := bs.reconcileEnv(targetProvisioners); err != nil {
panic(err)
}
@@ -222,11 +233,31 @@ func (bs *BaseSuite[Env]) IsDevMode() bool {
return bs.params.devMode
}
+// StartTime returns the time when test suite started
+func (bs *BaseSuite[Env]) StartTime() time.Time {
+ return bs.startTime
+}
+
+// EndTime returns the time when test suite ended
+func (bs *BaseSuite[Env]) EndTime() time.Time {
+ return bs.endTime
+}
+
+// DatadogClient returns a Datadog client that can be used to send telemtry info to dddev during e2e tests
+func (bs *BaseSuite[Env]) DatadogClient() *datadog.Client {
+ return bs.datadogClient
+}
+
func (bs *BaseSuite[Env]) init(options []SuiteOption, self Suite[Env]) {
for _, o := range options {
o(&bs.params)
}
+ initOnly, err := runner.GetProfile().ParamStore().GetBoolWithDefault(parameters.InitOnly, false)
+ if err == nil {
+ bs.initOnly = initOnly
+ }
+
if !runner.GetProfile().AllowDevMode() {
bs.params.devMode = false
}
@@ -284,12 +315,31 @@ func (bs *BaseSuite[Env]) reconcileEnv(targetProvisioners ProvisionerMap) error
}
if err != nil {
+ if diagnosableProvisioner, ok := provisioner.(Diagnosable); ok {
+ stackName, err := infra.GetStackManager().GetPulumiStackName(bs.params.stackName)
+ if err != nil {
+ bs.T().Logf("unable to get stack name for diagnose, err: %v", err)
+ } else {
+ diagnoseResult, diagnoseErr := diagnosableProvisioner.Diagnose(ctx, stackName)
+ if diagnoseErr != nil {
+ bs.T().Logf("WARNING: Diagnose failed: %v", diagnoseErr)
+ } else if diagnoseResult != "" {
+ bs.T().Logf("Diagnose result: %s", diagnoseResult)
+ }
+ }
+
+ }
return fmt.Errorf("your stack '%s' provisioning failed, check logs above. Provisioner was %s, failed with err: %v", bs.params.stackName, id, err)
}
resources.Merge(provisionerResources)
}
+ // When INIT_ONLY is set, we only partially provision the environment so we do not want initialize the environment
+ if bs.initOnly {
+ return nil
+ }
+
// Env is taken as parameter as some fields may have keys set by Env pulumi program.
err = bs.buildEnvFromResources(resources, newEnvFields, newEnvValues)
if err != nil {
@@ -312,6 +362,7 @@ func (bs *BaseSuite[Env]) reconcileEnv(targetProvisioners ProvisionerMap) error
func (bs *BaseSuite[Env]) createEnv() (*Env, []reflect.StructField, []reflect.Value, error) {
var env Env
+
envFields := reflect.VisibleFields(reflect.TypeOf(&env).Elem())
envValue := reflect.ValueOf(&env)
@@ -429,6 +480,7 @@ func (bs *BaseSuite[Env]) providerContext(opTimeout time.Duration) (context.Cont
//
// [testify Suite]: https://pkg.go.dev/github.com/stretchr/testify/suite
func (bs *BaseSuite[Env]) SetupSuite() {
+ bs.startTime = time.Now()
// In `SetupSuite` we cannot fail as `TearDownSuite` will not be called otherwise.
// Meaning that stack clean up may not be called.
// We do implement an explicit recover to handle this manuallay.
@@ -447,10 +499,21 @@ func (bs *BaseSuite[Env]) SetupSuite() {
panic(fmt.Errorf("Forward panic in SetupSuite after TearDownSuite, err was: %v", err))
}()
+ // Setup Datadog Client to be used to send telemetry when writing e2e tests
+ apiKey, err := runner.GetProfile().SecretStore().Get(parameters.APIKey)
+ bs.Require().NoError(err)
+ appKey, err := runner.GetProfile().SecretStore().Get(parameters.APPKey)
+ bs.Require().NoError(err)
+ bs.datadogClient = datadog.NewClient(apiKey, appKey)
+
if err := bs.reconcileEnv(bs.originalProvisioners); err != nil {
// `panic()` is required to stop the execution of the test suite. Otherwise `testify.Suite` will keep on running suite tests.
panic(err)
}
+
+ if bs.initOnly {
+ bs.T().Skip("INIT_ONLY is set, skipping tests")
+ }
}
// BeforeTest is executed right before the test starts and receives the suite and test names as input.
@@ -493,10 +556,17 @@ func (bs *BaseSuite[Env]) AfterTest(suiteName, testName string) {
//
// [testify Suite]: https://pkg.go.dev/github.com/stretchr/testify/suite
func (bs *BaseSuite[Env]) TearDownSuite() {
+ bs.endTime = time.Now()
+
if bs.params.devMode {
return
}
+ if bs.initOnly {
+ bs.T().Logf("INIT_ONLY is set, skipping deletion")
+ return
+ }
+
if bs.firstFailTest != "" && bs.params.skipDeleteOnFailure {
bs.Require().FailNow(fmt.Sprintf("%v failed. As SkipDeleteOnFailure feature is enabled the tests after %v were skipped. "+
"The environment of %v was kept.", bs.firstFailTest, bs.firstFailTest, bs.firstFailTest))
@@ -507,12 +577,53 @@ func (bs *BaseSuite[Env]) TearDownSuite() {
defer cancel()
for id, provisioner := range bs.originalProvisioners {
+ // Run provisioner Diagnose before tearing down the stack
+ if diagnosableProvisioner, ok := provisioner.(Diagnosable); ok {
+ stackName, err := infra.GetStackManager().GetPulumiStackName(bs.params.stackName)
+ if err != nil {
+ bs.T().Logf("unable to get stack name for diagnose, err: %v", err)
+ } else {
+ diagnoseResult, diagnoseErr := diagnosableProvisioner.Diagnose(ctx, stackName)
+ if diagnoseErr != nil {
+ bs.T().Logf("WARNING: Diagnose failed: %v", diagnoseErr)
+ } else if diagnoseResult != "" {
+ bs.T().Logf("Diagnose result: %s", diagnoseResult)
+ }
+ }
+ }
+
if err := provisioner.Destroy(ctx, bs.params.stackName, newTestLogger(bs.T())); err != nil {
bs.T().Errorf("unable to delete stack: %s, provisioner %s, err: %v", bs.params.stackName, id, err)
}
}
}
+// GetRootOutputDir returns the root output directory for tests to store output files and artifacts.
+// The directory is created on the first call to this function and reused in future calls.
+//
+// See BaseSuite.CreateTestOutputDir() for a function that returns a directory for the current test.
+//
+// See CreateRootOutputDir() for details on the root directory creation.
+func (bs *BaseSuite[Env]) GetRootOutputDir() (string, error) {
+ var err error
+ bs.onceTestSessionOutputDir.Do(func() {
+ // Store the timestamped directory to be used by all tests in the suite
+ bs.testSessionOutputDir, err = CreateRootOutputDir()
+ })
+ return bs.testSessionOutputDir, err
+}
+
+// CreateTestOutputDir returns an output directory for the current test.
+//
+// See also CreateTestOutputDir()
+func (bs *BaseSuite[Env]) CreateTestOutputDir() (string, error) {
+ root, err := bs.GetRootOutputDir()
+ if err != nil {
+ return "", err
+ }
+ return CreateTestOutputDir(root, bs.T())
+}
+
// Run is a helper function to run a test suite.
// Unfortunately, we cannot use `s Suite[Env]` as Go is not able to match it with a struct
// However it's able to verify the same constraint on T
diff --git a/test/new-e2e/pkg/e2e/suite_test.go b/test/new-e2e/pkg/e2e/suite_test.go
index d05b4cc3c8832..37219945b2389 100644
--- a/test/new-e2e/pkg/e2e/suite_test.go
+++ b/test/new-e2e/pkg/e2e/suite_test.go
@@ -25,7 +25,7 @@ type testTypeOutput struct {
type testTypeWrapper struct {
testTypeOutput
- unrelatedField string //nolint:unused, mimic actual struct to validate reflection code
+ unrelatedField string //nolint:unused // mimic actual struct to validate reflection code
}
var _ Initializable = &testTypeWrapper{}
diff --git a/test/new-e2e/pkg/e2e/suite_utils.go b/test/new-e2e/pkg/e2e/suite_utils.go
index 72c3e6f4a2681..ad7f1e540845a 100644
--- a/test/new-e2e/pkg/e2e/suite_utils.go
+++ b/test/new-e2e/pkg/e2e/suite_utils.go
@@ -5,7 +5,17 @@
package e2e
-import "testing"
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
+
+ "testing"
+)
type testLogger struct {
t *testing.T
@@ -16,6 +26,77 @@ func newTestLogger(t *testing.T) testLogger {
}
func (tl testLogger) Write(p []byte) (n int, err error) {
+ tl.t.Helper()
tl.t.Log(string(p))
return len(p), nil
}
+
+// CreateRootOutputDir creates and returns a directory for tests to store output files and artifacts.
+// A timestamp is included in the path to distinguish between multiple runs, and os.MkdirTemp() is
+// used to avoid name collisions between parallel runs.
+//
+// A new directory is created on each call to this function, it is recommended to save this result
+// and use it for all tests in a run. For example see BaseSuite.GetRootOutputDir().
+//
+// See runner.GetProfile().GetOutputDir() for the root output directory selection logic.
+//
+// See CreateTestOutputDir and BaseSuite.CreateTestOutputDir for a function that returns a subdirectory for a specific test.
+func CreateRootOutputDir() (string, error) {
+ outputRoot, err := runner.GetProfile().GetOutputDir()
+ if err != nil {
+ return "", err
+ }
+ // Append timestamp to distinguish between multiple runs
+ // Format: YYYY-MM-DD_HH-MM-SS
+ // Use a custom timestamp format because Windows paths can't contain ':' characters
+ // and we don't need the timezone information.
+ timePart := time.Now().Format("2006-01-02_15-04-05")
+ // create root directory
+ err = os.MkdirAll(outputRoot, 0755)
+ if err != nil {
+ return "", err
+ }
+ // Create final output directory
+ // Use MkdirTemp to avoid name collisions between parallel runs
+ outputRoot, err = os.MkdirTemp(outputRoot, fmt.Sprintf("%s_*", timePart))
+ if err != nil {
+ return "", err
+ }
+ if os.Getenv("CI") == "" {
+ // Create a symlink to the latest run for user convenience
+ // TODO: Is there a standard "ci" vs "local" check?
+ // This code used to be in localProfile.GetOutputDir()
+ latestLink := filepath.Join(filepath.Dir(outputRoot), "latest")
+ // Remove the symlink if it already exists
+ if _, err := os.Lstat(latestLink); err == nil {
+ err = os.Remove(latestLink)
+ if err != nil {
+ return "", err
+ }
+ }
+ err = os.Symlink(outputRoot, latestLink)
+ if err != nil {
+ return "", err
+ }
+ }
+ return outputRoot, nil
+}
+
+// CreateTestOutputDir creates a directory for a specific test that can be used to store output files and artifacts.
+// The test name is used in the directory name, and invalid characters are replaced with underscores.
+//
+// Example:
+// - test name: TestInstallSuite/TestInstall/install_version=7.50.0
+// - output directory: /TestInstallSuite/TestInstall/install_version_7_50_0
+func CreateTestOutputDir(root string, t *testing.T) (string, error) {
+ // https://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words
+ invalidPathChars := strings.Join([]string{"?", "%", "*", ":", "|", "\"", "<", ">", ".", ",", ";", "="}, "")
+
+ testPart := strings.ReplaceAll(t.Name(), invalidPathChars, "_")
+ path := filepath.Join(root, testPart)
+ err := os.MkdirAll(path, 0755)
+ if err != nil {
+ return "", err
+ }
+ return path, nil
+}
diff --git a/test/new-e2e/pkg/environments/aws/docker/host.go b/test/new-e2e/pkg/environments/aws/docker/host.go
index fd9f24738162c..52d019a59e963 100644
--- a/test/new-e2e/pkg/environments/aws/docker/host.go
+++ b/test/new-e2e/pkg/environments/aws/docker/host.go
@@ -16,6 +16,7 @@ import (
"github.com/DataDog/test-infra-definitions/common/utils"
"github.com/DataDog/test-infra-definitions/components/datadog/agent"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/dogstatsd"
"github.com/DataDog/test-infra-definitions/components/datadog/dockeragentparams"
"github.com/DataDog/test-infra-definitions/components/docker"
"github.com/DataDog/test-infra-definitions/resources/aws"
@@ -38,6 +39,7 @@ type ProvisionerParams struct {
agentOptions []dockeragentparams.Option
fakeintakeOptions []fakeintake.Option
extraConfigParams runner.ConfigMap
+ testingWorkload bool
}
func newProvisionerParams() *ProvisionerParams {
@@ -120,19 +122,35 @@ func WithoutAgent() ProvisionerOption {
}
}
+// WithTestingWorkload enables testing workload
+func WithTestingWorkload() ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.testingWorkload = true
+ return nil
+ }
+}
+
+// RunParams contains parameters for the run function
+type RunParams struct {
+ Environment *aws.Environment
+ ProvisionerParams *ProvisionerParams
+}
+
// Run deploys a docker environment given a pulumi.Context
-func Run(ctx *pulumi.Context, env *environments.DockerHost, params *ProvisionerParams) error {
+func Run(ctx *pulumi.Context, env *environments.DockerHost, runParams RunParams) error {
var awsEnv aws.Environment
- var err error
- if env.AwsEnvironment != nil {
- awsEnv = *env.AwsEnvironment
- } else {
+ if runParams.Environment == nil {
+ var err error
awsEnv, err = aws.NewEnvironment(ctx)
if err != nil {
return err
}
+ } else {
+ awsEnv = *runParams.Environment
}
+ params := runParams.ProvisionerParams
+
host, err := ec2.NewVM(awsEnv, params.name, params.vmOptions...)
if err != nil {
return err
@@ -142,12 +160,18 @@ func Run(ctx *pulumi.Context, env *environments.DockerHost, params *ProvisionerP
return err
}
+ // install the ECR credentials helper
+ // required to get pipeline agent images
installEcrCredsHelperCmd, err := ec2.InstallECRCredentialsHelper(awsEnv, host)
if err != nil {
return err
}
- manager, _, err := docker.NewManager(*awsEnv.CommonEnvironment, host, utils.PulumiDependsOn(installEcrCredsHelperCmd))
+ manager, err := docker.NewManager(&awsEnv, host, utils.PulumiDependsOn(installEcrCredsHelperCmd))
+ if err != nil {
+ return err
+ }
+ err = manager.Export(ctx, &env.Docker.ManagerOutput)
if err != nil {
return err
}
@@ -176,7 +200,11 @@ func Run(ctx *pulumi.Context, env *environments.DockerHost, params *ProvisionerP
// Create Agent if required
if params.agentOptions != nil {
- agent, err := agent.NewDockerAgent(*awsEnv.CommonEnvironment, host, manager, params.agentOptions...)
+ if params.testingWorkload {
+ params.agentOptions = append(params.agentOptions, dockeragentparams.WithExtraComposeManifest(dogstatsd.DockerComposeManifest.Name, dogstatsd.DockerComposeManifest.Content))
+ params.agentOptions = append(params.agentOptions, dockeragentparams.WithEnvironmentVariables(pulumi.StringMap{"HOST_IP": host.Address}))
+ }
+ agent, err := agent.NewDockerAgent(&awsEnv, host, manager, params.agentOptions...)
if err != nil {
return err
}
@@ -199,7 +227,7 @@ func Provisioner(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Do
// We need to build params here to be able to use params.name in the provisioner name
params := GetProvisionerParams(opts...)
provisioner := e2e.NewTypedPulumiProvisioner(provisionerBaseID+params.name, func(ctx *pulumi.Context, env *environments.DockerHost) error {
- return Run(ctx, env, params)
+ return Run(ctx, env, RunParams{ProvisionerParams: params})
}, params.extraConfigParams)
return provisioner
diff --git a/test/new-e2e/pkg/environments/aws/ecs/ecs.go b/test/new-e2e/pkg/environments/aws/ecs/ecs.go
index da4acebd14331..426b8271f44b0 100644
--- a/test/new-e2e/pkg/environments/aws/ecs/ecs.go
+++ b/test/new-e2e/pkg/environments/aws/ecs/ecs.go
@@ -9,15 +9,23 @@ package ecs
import (
"fmt"
+ "github.com/DataDog/test-infra-definitions/common/config"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ssm"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
- "github.com/DataDog/test-infra-definitions/common/config"
"github.com/DataDog/test-infra-definitions/components/datadog/agent"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/aspnetsample"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/cpustress"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/dogstatsd"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/nginx"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/prometheus"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/redis"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/tracegen"
"github.com/DataDog/test-infra-definitions/components/datadog/ecsagentparams"
fakeintakeComp "github.com/DataDog/test-infra-definitions/components/datadog/fakeintake"
+ ecsComp "github.com/DataDog/test-infra-definitions/components/ecs"
"github.com/DataDog/test-infra-definitions/resources/aws"
- "github.com/DataDog/test-infra-definitions/resources/aws/ecs"
+ "github.com/DataDog/test-infra-definitions/scenarios/aws/ecs"
"github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
@@ -36,14 +44,14 @@ type ProvisionerParams struct {
name string
agentOptions []ecsagentparams.Option
fakeintakeOptions []fakeintake.Option
+ ecsOptions []ecs.Option
extraConfigParams runner.ConfigMap
- ecsFargate bool
- ecsLinuxECSOptimizedNodeGroup bool
- ecsLinuxECSOptimizedARMNodeGroup bool
- ecsLinuxBottlerocketNodeGroup bool
- ecsWindowsNodeGroup bool
infraShouldDeployFakeintakeWithLB bool
+ testingWorkload bool
+ workloadAppFuncs []WorkloadAppFunc
+ fargateWorkloadAppFuncs []FargateWorkloadAppFunc
+ awsEnv *aws.Environment
}
func newProvisionerParams() *ProvisionerParams {
@@ -52,13 +60,9 @@ func newProvisionerParams() *ProvisionerParams {
name: defaultECS,
agentOptions: []ecsagentparams.Option{},
fakeintakeOptions: []fakeintake.Option{},
+ ecsOptions: []ecs.Option{},
extraConfigParams: runner.ConfigMap{},
- ecsFargate: false,
- ecsLinuxECSOptimizedNodeGroup: false,
- ecsLinuxECSOptimizedARMNodeGroup: false,
- ecsLinuxBottlerocketNodeGroup: false,
- ecsWindowsNodeGroup: false,
infraShouldDeployFakeintakeWithLB: false,
}
}
@@ -100,66 +104,72 @@ func WithFakeIntakeOptions(opts ...fakeintake.Option) ProvisionerOption {
}
}
-// WithECSFargateCapacityProvider enable Fargate ECS
-func WithECSFargateCapacityProvider() ProvisionerOption {
+// WithECSOptions sets the options for ECS cluster
+func WithECSOptions(opts ...ecs.Option) ProvisionerOption {
return func(params *ProvisionerParams) error {
- params.ecsFargate = true
+ params.ecsOptions = append(params.ecsOptions, opts...)
return nil
}
}
-// WithECSLinuxECSOptimizedNodeGroup enable aws/ecs/linuxECSOptimizedNodeGroup
-func WithECSLinuxECSOptimizedNodeGroup() ProvisionerOption {
+// WithTestingWorkload deploys testing workloads for nginx, redis, cpustress, dogstatsd, prometheus and tracegen
+func WithTestingWorkload() ProvisionerOption {
return func(params *ProvisionerParams) error {
- params.ecsLinuxECSOptimizedNodeGroup = true
+ params.testingWorkload = true
return nil
}
}
-// WithECSLinuxECSOptimizedARMNodeGroup enable aws/ecs/linuxECSOptimizedARMNodeGroup
-func WithECSLinuxECSOptimizedARMNodeGroup() ProvisionerOption {
+// WithInfraShouldDeployFakeintakeWithLB enable load balancer on Fakeintake
+func WithInfraShouldDeployFakeintakeWithLB() ProvisionerOption {
return func(params *ProvisionerParams) error {
- params.ecsLinuxECSOptimizedARMNodeGroup = true
+ params.infraShouldDeployFakeintakeWithLB = true
return nil
}
}
-// WithECSLinuxBottlerocketNodeGroup enable aws/ecs/linuxBottlerocketNodeGroup
-func WithECSLinuxBottlerocketNodeGroup() ProvisionerOption {
+// WithoutFakeIntake deactivates the creation of the FakeIntake
+func WithoutFakeIntake() ProvisionerOption {
return func(params *ProvisionerParams) error {
- params.ecsLinuxBottlerocketNodeGroup = true
+ params.fakeintakeOptions = nil
return nil
}
}
-// WithECSWindowsNodeGroup enable aws/ecs/windowsLTSCNodeGroup
-func WithECSWindowsNodeGroup() ProvisionerOption {
+// WithoutAgent deactivates the creation of the Docker Agent
+func WithoutAgent() ProvisionerOption {
return func(params *ProvisionerParams) error {
- params.ecsWindowsNodeGroup = true
+ params.agentOptions = nil
return nil
}
}
-// WithInfraShouldDeployFakeintakeWithLB enable load balancer on Fakeintake
-func WithInfraShouldDeployFakeintakeWithLB() ProvisionerOption {
+// WithAwsEnv asks the provisioner to use the given environment, it is created otherwise
+func WithAwsEnv(env *aws.Environment) ProvisionerOption {
return func(params *ProvisionerParams) error {
- params.infraShouldDeployFakeintakeWithLB = true
+ params.awsEnv = env
return nil
}
}
-// WithoutFakeIntake deactivates the creation of the FakeIntake
-func WithoutFakeIntake() ProvisionerOption {
+// WorkloadAppFunc is a function that deploys a workload app to an ECS cluster
+type WorkloadAppFunc func(e aws.Environment, clusterArn pulumi.StringInput) (*ecsComp.Workload, error)
+
+// WithWorkloadApp adds a workload app to the environment
+func WithWorkloadApp(appFunc WorkloadAppFunc) ProvisionerOption {
return func(params *ProvisionerParams) error {
- params.fakeintakeOptions = nil
+ params.workloadAppFuncs = append(params.workloadAppFuncs, appFunc)
return nil
}
}
-// WithoutAgent deactivates the creation of the Docker Agent
-func WithoutAgent() ProvisionerOption {
+// FargateWorkloadAppFunc is a function that deploys a Fargate workload app to an ECS cluster
+type FargateWorkloadAppFunc func(e aws.Environment, clusterArn pulumi.StringInput, apiKeySSMParamName pulumi.StringInput, fakeIntake *fakeintakeComp.Fakeintake) (*ecsComp.Workload, error)
+
+// WithFargateWorkloadApp adds a Fargate workload app to the environment
+func WithFargateWorkloadApp(appFunc FargateWorkloadAppFunc) ProvisionerOption {
return func(params *ProvisionerParams) error {
- params.agentOptions = nil
+ params.fargateWorkloadAppFuncs = append(params.fargateWorkloadAppFuncs, appFunc)
return nil
}
}
@@ -168,80 +178,32 @@ func WithoutAgent() ProvisionerOption {
func Run(ctx *pulumi.Context, env *environments.ECS, params *ProvisionerParams) error {
var awsEnv aws.Environment
var err error
- if env.AwsEnvironment != nil {
- awsEnv = *env.AwsEnvironment
+ if params.awsEnv != nil {
+ awsEnv = *params.awsEnv
} else {
awsEnv, err = aws.NewEnvironment(ctx)
if err != nil {
return err
}
}
- // Create cluster
- ecsCluster, err := ecs.CreateEcsCluster(awsEnv, params.name)
+ clusterParams, err := ecs.NewParams(params.ecsOptions...)
if err != nil {
return err
}
- // Export cluster’s properties
- ctx.Export("ecs-cluster-name", ecsCluster.Name)
- ctx.Export("ecs-cluster-arn", ecsCluster.Arn)
- env.ClusterName = ecsCluster.Name
- env.ClusterArn = ecsCluster.Arn
-
- // Handle capacity providers
- capacityProviders := pulumi.StringArray{}
- if params.ecsFargate {
- capacityProviders = append(capacityProviders, pulumi.String("FARGATE"))
- }
-
- linuxNodeGroupPresent := false
- if params.ecsLinuxECSOptimizedNodeGroup {
- cpName, err := ecs.NewECSOptimizedNodeGroup(awsEnv, ecsCluster.Name, false)
- if err != nil {
- return err
- }
-
- capacityProviders = append(capacityProviders, cpName)
- linuxNodeGroupPresent = true
- }
-
- if params.ecsLinuxECSOptimizedARMNodeGroup {
- cpName, err := ecs.NewECSOptimizedNodeGroup(awsEnv, ecsCluster.Name, true)
- if err != nil {
- return err
- }
-
- capacityProviders = append(capacityProviders, cpName)
- linuxNodeGroupPresent = true
- }
-
- if params.ecsLinuxBottlerocketNodeGroup {
- cpName, err := ecs.NewBottlerocketNodeGroup(awsEnv, ecsCluster.Name)
- if err != nil {
- return err
- }
-
- capacityProviders = append(capacityProviders, cpName)
- linuxNodeGroupPresent = true
- }
-
- if params.ecsWindowsNodeGroup {
- cpName, err := ecs.NewWindowsNodeGroup(awsEnv, ecsCluster.Name)
- if err != nil {
- return err
- }
-
- capacityProviders = append(capacityProviders, cpName)
+ // Create cluster
+ cluster, err := ecs.NewCluster(awsEnv, params.name, params.ecsOptions...)
+ if err != nil {
+ return err
}
-
- // Associate capacity providers
- _, err = ecs.NewClusterCapacityProvider(awsEnv, ctx.Stack(), ecsCluster.Name, capacityProviders)
+ err = cluster.Export(ctx, &env.ECSCluster.ClusterOutput)
if err != nil {
return err
}
var apiKeyParam *ssm.Parameter
var fakeIntake *fakeintakeComp.Fakeintake
+
// Create task and service
if params.agentOptions != nil {
if params.fakeintakeOptions != nil {
@@ -254,12 +216,13 @@ func Run(ctx *pulumi.Context, env *environments.ECS, params *ProvisionerParams)
if fakeIntake, err = fakeintake.NewECSFargateInstance(awsEnv, "ecs", fakeIntakeOptions...); err != nil {
return err
}
- if err := fakeIntake.Export(awsEnv.Ctx, &env.FakeIntake.FakeintakeOutput); err != nil {
+ if err := fakeIntake.Export(awsEnv.Ctx(), &env.FakeIntake.FakeintakeOutput); err != nil {
return err
}
}
+
apiKeyParam, err = ssm.NewParameter(ctx, awsEnv.Namer.ResourceName("agent-apikey"), &ssm.ParameterArgs{
- Name: awsEnv.CommonNamer.DisplayName(1011, pulumi.String("agent-apikey")),
+ Name: awsEnv.CommonNamer().DisplayName(1011, pulumi.String("agent-apikey")),
Type: ssm.ParameterTypeSecureString,
Overwrite: pulumi.Bool(true),
Value: awsEnv.AgentAPIKey(),
@@ -268,16 +231,66 @@ func Run(ctx *pulumi.Context, env *environments.ECS, params *ProvisionerParams)
return err
}
- // Deploy EC2 Agent
- if linuxNodeGroupPresent {
- agentDaemon, err := agent.ECSLinuxDaemonDefinition(awsEnv, "ec2-linux-dd-agent", apiKeyParam.Name, fakeIntake, ecsCluster.Arn, params.agentOptions...)
- if err != nil {
- return err
+ _, err := agent.ECSLinuxDaemonDefinition(awsEnv, "ec2-linux-dd-agent", apiKeyParam.Name, fakeIntake, cluster.ClusterArn, params.agentOptions...)
+ if err != nil {
+ return err
+ }
+
+ // Deploy Fargate Apps
+ if clusterParams.FargateCapacityProvider {
+ for _, fargateAppFunc := range params.fargateWorkloadAppFuncs {
+ _, err := fargateAppFunc(awsEnv, cluster.ClusterArn, apiKeyParam.Name, fakeIntake)
+ if err != nil {
+ return err
+ }
}
+ }
+ }
+
+ if params.testingWorkload {
+ if _, err := nginx.EcsAppDefinition(awsEnv, cluster.ClusterArn); err != nil {
+ return err
+ }
+
+ if _, err := redis.EcsAppDefinition(awsEnv, cluster.ClusterArn); err != nil {
+ return err
+ }
+
+ if _, err := cpustress.EcsAppDefinition(awsEnv, cluster.ClusterArn); err != nil {
+ return err
+ }
+
+ if _, err := dogstatsd.EcsAppDefinition(awsEnv, cluster.ClusterArn); err != nil {
+ return err
+ }
+
+ if _, err := prometheus.EcsAppDefinition(awsEnv, cluster.ClusterArn); err != nil {
+ return err
+ }
+
+ if _, err := tracegen.EcsAppDefinition(awsEnv, cluster.ClusterArn); err != nil {
+ return err
+ }
+ }
+
+ if clusterParams.FargateCapacityProvider && params.testingWorkload && params.agentOptions != nil {
- ctx.Export("agent-ec2-linux-task-arn", agentDaemon.TaskDefinition.Arn())
- ctx.Export("agent-ec2-linux-task-family", agentDaemon.TaskDefinition.Family())
- ctx.Export("agent-ec2-linux-task-version", agentDaemon.TaskDefinition.Revision())
+ if _, err := redis.FargateAppDefinition(awsEnv, cluster.ClusterArn, apiKeyParam.Name, fakeIntake); err != nil {
+ return err
+ }
+
+ if _, err = nginx.FargateAppDefinition(awsEnv, cluster.ClusterArn, apiKeyParam.Name, fakeIntake); err != nil {
+ return err
+ }
+
+ if _, err = aspnetsample.FargateAppDefinition(awsEnv, cluster.ClusterArn, apiKeyParam.Name, fakeIntake); err != nil {
+ return err
+ }
+ }
+ for _, appFunc := range params.workloadAppFuncs {
+ _, err := appFunc(awsEnv, cluster.ClusterArn)
+ if err != nil {
+ return err
}
}
diff --git a/test/new-e2e/pkg/environments/aws/host/host.go b/test/new-e2e/pkg/environments/aws/host/host.go
index 91a47332dc9d1..4668905ac83a8 100644
--- a/test/new-e2e/pkg/environments/aws/host/host.go
+++ b/test/new-e2e/pkg/environments/aws/host/host.go
@@ -12,6 +12,7 @@ import (
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional"
"github.com/DataDog/test-infra-definitions/common/utils"
@@ -35,22 +36,24 @@ const (
type ProvisionerParams struct {
name string
- instanceOptions []ec2.VMOption
- agentOptions []agentparams.Option
- fakeintakeOptions []fakeintake.Option
- extraConfigParams runner.ConfigMap
- installDocker bool
- installUpdater bool
+ instanceOptions []ec2.VMOption
+ agentOptions []agentparams.Option
+ agentClientOptions []agentclientparams.Option
+ fakeintakeOptions []fakeintake.Option
+ extraConfigParams runner.ConfigMap
+ installDocker bool
+ installUpdater bool
}
func newProvisionerParams() *ProvisionerParams {
// We use nil arrays to decide if we should create or not
return &ProvisionerParams{
- name: defaultVMName,
- instanceOptions: []ec2.VMOption{},
- agentOptions: []agentparams.Option{},
- fakeintakeOptions: []fakeintake.Option{},
- extraConfigParams: runner.ConfigMap{},
+ name: defaultVMName,
+ instanceOptions: []ec2.VMOption{},
+ agentOptions: []agentparams.Option{},
+ agentClientOptions: []agentclientparams.Option{},
+ fakeintakeOptions: []fakeintake.Option{},
+ extraConfigParams: runner.ConfigMap{},
}
}
@@ -91,6 +94,14 @@ func WithAgentOptions(opts ...agentparams.Option) ProvisionerOption {
}
}
+// WithAgentClientOptions adds options to the Agent client.
+func WithAgentClientOptions(opts ...agentclientparams.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentClientOptions = append(params.agentClientOptions, opts...)
+ return nil
+ }
+}
+
// WithFakeIntakeOptions adds options to the FakeIntake.
func WithFakeIntakeOptions(opts ...fakeintake.Option) ProvisionerOption {
return func(params *ProvisionerParams) error {
@@ -157,19 +168,26 @@ func ProvisionerNoFakeIntake(opts ...ProvisionerOption) e2e.TypedProvisioner[env
return Provisioner(mergedOpts...)
}
+// RunParams is a set of parameters for the Run function.
+type RunParams struct {
+ Environment *aws.Environment
+ ProvisionerParams *ProvisionerParams
+}
+
// Run deploys a environment given a pulumi.Context
-func Run(ctx *pulumi.Context, env *environments.Host, params *ProvisionerParams) error {
+func Run(ctx *pulumi.Context, env *environments.Host, runParams RunParams) error {
var awsEnv aws.Environment
- var err error
- if env.AwsEnvironment != nil {
- awsEnv = *env.AwsEnvironment
- } else {
+ if runParams.Environment == nil {
+ var err error
awsEnv, err = aws.NewEnvironment(ctx)
if err != nil {
return err
}
+ } else {
+ awsEnv = *runParams.Environment
}
+ params := runParams.ProvisionerParams
host, err := ec2.NewVM(awsEnv, params.name, params.instanceOptions...)
if err != nil {
return err
@@ -180,7 +198,15 @@ func Run(ctx *pulumi.Context, env *environments.Host, params *ProvisionerParams)
}
if params.installDocker {
- _, dockerRes, err := docker.NewManager(*awsEnv.CommonEnvironment, host)
+ // install the ECR credentials helper
+ // required to get pipeline agent images or other internally hosted images
+ installEcrCredsHelperCmd, err := ec2.InstallECRCredentialsHelper(awsEnv, host)
+ if err != nil {
+ return err
+ }
+
+ dockerManager, err := docker.NewManager(&awsEnv, host, utils.PulumiDependsOn(installEcrCredsHelperCmd))
+
if err != nil {
return err
}
@@ -191,7 +217,7 @@ func Run(ctx *pulumi.Context, env *environments.Host, params *ProvisionerParams)
// at the same time.
params.agentOptions = append(params.agentOptions,
agentparams.WithPulumiResourceOptions(
- utils.PulumiDependsOn(dockerRes)))
+ utils.PulumiDependsOn(dockerManager)))
}
}
@@ -223,7 +249,7 @@ func Run(ctx *pulumi.Context, env *environments.Host, params *ProvisionerParams)
// Create Agent if required
if params.installUpdater && params.agentOptions != nil {
- updater, err := updater.NewHostUpdater(awsEnv.CommonEnvironment, host, params.agentOptions...)
+ updater, err := updater.NewHostUpdater(&awsEnv, host, params.agentOptions...)
if err != nil {
return err
}
@@ -235,7 +261,8 @@ func Run(ctx *pulumi.Context, env *environments.Host, params *ProvisionerParams)
// todo: add agent once updater installs agent on bootstrap
env.Agent = nil
} else if params.agentOptions != nil {
- agent, err := agent.NewHostAgent(awsEnv.CommonEnvironment, host, params.agentOptions...)
+ agentOptions := append(params.agentOptions, agentparams.WithTags([]string{fmt.Sprintf("stackid:%s", ctx.Stack())}))
+ agent, err := agent.NewHostAgent(&awsEnv, host, agentOptions...)
if err != nil {
return err
}
@@ -244,6 +271,8 @@ func Run(ctx *pulumi.Context, env *environments.Host, params *ProvisionerParams)
if err != nil {
return err
}
+
+ env.Agent.ClientOptions = params.agentClientOptions
} else {
// Suite inits all fields by default, so we need to explicitly set it to nil
env.Agent = nil
@@ -262,7 +291,7 @@ func Provisioner(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Ho
// We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times.
// and it's easy to forget about it, leading to hard to debug issues.
params := GetProvisionerParams(opts...)
- return Run(ctx, env, params)
+ return Run(ctx, env, RunParams{ProvisionerParams: params})
}, params.extraConfigParams)
return provisioner
diff --git a/test/new-e2e/pkg/environments/aws/host/windows/host.go b/test/new-e2e/pkg/environments/aws/host/windows/host.go
index d846a5151ed3a..e7f74faeae4c1 100644
--- a/test/new-e2e/pkg/environments/aws/host/windows/host.go
+++ b/test/new-e2e/pkg/environments/aws/host/windows/host.go
@@ -8,9 +8,7 @@ package winawshost
import (
"fmt"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional"
+
"github.com/DataDog/test-infra-definitions/components/activedirectory"
"github.com/DataDog/test-infra-definitions/components/datadog/agent"
"github.com/DataDog/test-infra-definitions/components/datadog/agentparams"
@@ -19,6 +17,12 @@ import (
"github.com/DataDog/test-infra-definitions/scenarios/aws/ec2"
"github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional"
+ "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/components/defender"
)
const (
@@ -32,8 +36,10 @@ type ProvisionerParams struct {
instanceOptions []ec2.VMOption
agentOptions []agentparams.Option
+ agentClientOptions []agentclientparams.Option
fakeintakeOptions []fakeintake.Option
activeDirectoryOptions []activedirectory.Option
+ defenderoptions []defender.Option
}
// ProvisionerOption is a provisioner option.
@@ -71,6 +77,14 @@ func WithoutAgent() ProvisionerOption {
}
}
+// WithAgentClientOptions adds options to the Agent client.
+func WithAgentClientOptions(opts ...agentclientparams.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentClientOptions = append(params.agentClientOptions, opts...)
+ return nil
+ }
+}
+
// WithFakeIntakeOptions adds options to the FakeIntake.
func WithFakeIntakeOptions(opts ...fakeintake.Option) ProvisionerOption {
return func(params *ProvisionerParams) error {
@@ -95,6 +109,14 @@ func WithActiveDirectoryOptions(opts ...activedirectory.Option) ProvisionerOptio
}
}
+// WithDefenderOptions configures Windows Defender on an EC2 VM.
+func WithDefenderOptions(opts ...defender.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.defenderoptions = append(params.defenderoptions, opts...)
+ return nil
+ }
+}
+
// Run deploys a Windows environment given a pulumi.Context
func Run(ctx *pulumi.Context, env *environments.WindowsHost, params *ProvisionerParams) error {
awsEnv, err := aws.NewEnvironment(ctx)
@@ -102,6 +124,8 @@ func Run(ctx *pulumi.Context, env *environments.WindowsHost, params *Provisioner
return err
}
+ env.Environment = &awsEnv
+
// Make sure to override any OS other than Windows
// TODO: Make the Windows version configurable
params.instanceOptions = append(params.instanceOptions, ec2.WithOS(os.WindowsDefault))
@@ -115,8 +139,19 @@ func Run(ctx *pulumi.Context, env *environments.WindowsHost, params *Provisioner
return err
}
+ if params.defenderoptions != nil {
+ defender, err := defender.NewDefender(awsEnv.CommonEnvironment, host, params.defenderoptions...)
+ if err != nil {
+ return err
+ }
+ // Active Directory setup needs to happen after Windows Defender setup
+ params.activeDirectoryOptions = append(params.activeDirectoryOptions,
+ activedirectory.WithPulumiResourceOptions(
+ pulumi.DependsOn(defender.Resources)))
+ }
+
if params.activeDirectoryOptions != nil {
- activeDirectoryComp, activeDirectoryResources, err := activedirectory.NewActiveDirectory(ctx, awsEnv.CommonEnvironment, host, params.activeDirectoryOptions...)
+ activeDirectoryComp, activeDirectoryResources, err := activedirectory.NewActiveDirectory(ctx, &awsEnv, host, params.activeDirectoryOptions...)
if err != nil {
return err
}
@@ -157,7 +192,8 @@ func Run(ctx *pulumi.Context, env *environments.WindowsHost, params *Provisioner
}
if params.agentOptions != nil {
- agent, err := agent.NewHostAgent(awsEnv.CommonEnvironment, host, params.agentOptions...)
+ agentOptions := append(params.agentOptions, agentparams.WithTags([]string{fmt.Sprintf("stackid:%s", ctx.Stack())}))
+ agent, err := agent.NewHostAgent(&awsEnv, host, agentOptions...)
if err != nil {
return err
}
@@ -165,6 +201,7 @@ func Run(ctx *pulumi.Context, env *environments.WindowsHost, params *Provisioner
if err != nil {
return err
}
+ env.Agent.ClientOptions = params.agentClientOptions
} else {
env.Agent = nil
}
@@ -174,10 +211,13 @@ func Run(ctx *pulumi.Context, env *environments.WindowsHost, params *Provisioner
func getProvisionerParams(opts ...ProvisionerOption) *ProvisionerParams {
params := &ProvisionerParams{
- name: "",
- instanceOptions: []ec2.VMOption{},
- agentOptions: []agentparams.Option{},
- fakeintakeOptions: []fakeintake.Option{},
+ name: defaultVMName,
+ instanceOptions: []ec2.VMOption{},
+ agentOptions: []agentparams.Option{},
+ agentClientOptions: []agentclientparams.Option{},
+ fakeintakeOptions: []fakeintake.Option{},
+ // Disable Windows Defender on VMs by default
+ defenderoptions: []defender.Option{defender.WithDefenderDisabled()},
}
err := optional.ApplyOptions(params, opts)
if err != nil {
@@ -203,7 +243,7 @@ func Provisioner(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Wi
// ProvisionerNoAgent wraps Provisioner with hardcoded WithoutAgent options.
func ProvisionerNoAgent(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.WindowsHost] {
- mergedOpts := make([]ProvisionerOption, 0, len(opts)+2)
+ mergedOpts := make([]ProvisionerOption, 0, len(opts)+1)
mergedOpts = append(mergedOpts, opts...)
mergedOpts = append(mergedOpts, WithoutAgent())
diff --git a/test/new-e2e/pkg/environments/aws/kubernetes/eks.go b/test/new-e2e/pkg/environments/aws/kubernetes/eks.go
new file mode 100644
index 0000000000000..eef9a51518011
--- /dev/null
+++ b/test/new-e2e/pkg/environments/aws/kubernetes/eks.go
@@ -0,0 +1,182 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package awskubernetes contains the provisioner for the Kubernetes based environments
+package awskubernetes
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/DataDog/test-infra-definitions/common/utils"
+ "github.com/DataDog/test-infra-definitions/components/datadog/agent/helm"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/cpustress"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/dogstatsd"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/mutatedbyadmissioncontroller"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/nginx"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/prometheus"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/redis"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/tracegen"
+ dogstatsdstandalone "github.com/DataDog/test-infra-definitions/components/datadog/dogstatsd-standalone"
+ fakeintakeComp "github.com/DataDog/test-infra-definitions/components/datadog/fakeintake"
+ "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams"
+ "github.com/DataDog/test-infra-definitions/resources/aws"
+ "github.com/DataDog/test-infra-definitions/scenarios/aws/eks"
+ "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional"
+
+ "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+)
+
+func eksDiagnoseFunc(ctx context.Context, stackName string) (string, error) {
+ dumpResult, err := dumpEKSClusterState(ctx, stackName)
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("Dumping EKS cluster state:\n%s", dumpResult), nil
+}
+
+// EKSProvisioner creates a new provisioner
+func EKSProvisioner(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Kubernetes] {
+ // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times.
+ // and it's easy to forget about it, leading to hard to debug issues.
+ params := newProvisionerParams()
+ _ = optional.ApplyOptions(params, opts)
+
+ provisioner := e2e.NewTypedPulumiProvisioner(provisionerBaseID+params.name, func(ctx *pulumi.Context, env *environments.Kubernetes) error {
+ // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times.
+ // and it's easy to forget about it, leading to hard to debug issues.
+ params := newProvisionerParams()
+ _ = optional.ApplyOptions(params, opts)
+
+ return EKSRunFunc(ctx, env, params)
+ }, params.extraConfigParams)
+
+ provisioner.SetDiagnoseFunc(eksDiagnoseFunc)
+
+ return provisioner
+}
+
+// EKSRunFunc deploys a EKS environment given a pulumi.Context
+func EKSRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *ProvisionerParams) error {
+ var awsEnv aws.Environment
+ var err error
+ if params.awsEnv != nil {
+ awsEnv = *params.awsEnv
+ } else {
+ awsEnv, err = aws.NewEnvironment(ctx)
+ if err != nil {
+ return err
+ }
+ }
+
+ cluster, err := eks.NewCluster(awsEnv, params.name, params.eksOptions...)
+ if err != nil {
+ return err
+ }
+
+ if err := cluster.Export(ctx, &env.KubernetesCluster.ClusterOutput); err != nil {
+ return err
+ }
+
+ if awsEnv.InitOnly() {
+ return nil
+ }
+
+ var fakeIntake *fakeintakeComp.Fakeintake
+ if params.fakeintakeOptions != nil {
+ fakeIntakeOptions := []fakeintake.Option{
+ fakeintake.WithCPU(1024),
+ fakeintake.WithMemory(6144),
+ }
+ if awsEnv.GetCommonEnvironment().InfraShouldDeployFakeintakeWithLB() {
+ fakeIntakeOptions = append(fakeIntakeOptions, fakeintake.WithLoadBalancer())
+ }
+
+ if fakeIntake, err = fakeintake.NewECSFargateInstance(awsEnv, "ecs", fakeIntakeOptions...); err != nil {
+ return err
+ }
+ if err := fakeIntake.Export(awsEnv.Ctx(), &env.FakeIntake.FakeintakeOutput); err != nil {
+ return err
+ }
+ } else {
+ env.FakeIntake = nil
+ }
+
+ workloadWithCRDDeps := []pulumi.Resource{cluster}
+ // Deploy the agent
+ if params.agentOptions != nil {
+ params.agentOptions = append(params.agentOptions, kubernetesagentparams.WithPulumiResourceOptions(utils.PulumiDependsOn(cluster)), kubernetesagentparams.WithFakeintake(fakeIntake))
+ kubernetesAgent, err := helm.NewKubernetesAgent(&awsEnv, "eks", cluster.KubeProvider, params.agentOptions...)
+ if err != nil {
+ return err
+ }
+ err = kubernetesAgent.Export(ctx, &env.Agent.KubernetesAgentOutput)
+ if err != nil {
+ return err
+ }
+ workloadWithCRDDeps = append(workloadWithCRDDeps, kubernetesAgent)
+ } else {
+ env.Agent = nil
+ }
+ // Deploy standalone dogstatsd
+ if params.deployDogstatsd {
+ if _, err := dogstatsdstandalone.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "dogstatsd-standalone", fakeIntake, true, ""); err != nil {
+ return err
+ }
+ }
+
+ if params.deployTestWorkload {
+
+ if _, err := cpustress.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-cpustress", utils.PulumiDependsOn(cluster)); err != nil {
+ return err
+ }
+
+ // dogstatsd clients that report to the Agent
+ if _, err := dogstatsd.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-dogstatsd", 8125, "/var/run/datadog/dsd.socket", utils.PulumiDependsOn(cluster)); err != nil {
+ return err
+ }
+
+ // dogstatsd clients that report to the dogstatsd standalone deployment
+ if _, err := dogstatsd.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-dogstatsd-standalone", dogstatsdstandalone.HostPort, dogstatsdstandalone.Socket, utils.PulumiDependsOn(cluster)); err != nil {
+ return err
+ }
+
+ if _, err := tracegen.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-tracegen", utils.PulumiDependsOn(cluster)); err != nil {
+ return err
+ }
+
+ if _, err := prometheus.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-prometheus", utils.PulumiDependsOn(cluster)); err != nil {
+ return err
+ }
+
+ if _, err := mutatedbyadmissioncontroller.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-mutated", "workload-mutated-lib-injection", utils.PulumiDependsOn(cluster)); err != nil {
+ return err
+ }
+
+ // These resources cannot be deployed if the Agent is not installed, it requires some CRDs provided by the Helm chart
+ if params.agentOptions != nil {
+ if _, err := nginx.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-nginx", "", true, utils.PulumiDependsOn(workloadWithCRDDeps...)); err != nil {
+ return err
+ }
+
+ if _, err := redis.K8sAppDefinition(&awsEnv, cluster.KubeProvider, "workload-redis", true, utils.PulumiDependsOn(workloadWithCRDDeps...)); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Deploy workloads
+ for _, appFunc := range params.workloadAppFuncs {
+ _, err := appFunc(&awsEnv, cluster.KubeProvider)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/test/new-e2e/pkg/environments/aws/kubernetes/kind.go b/test/new-e2e/pkg/environments/aws/kubernetes/kind.go
index d4b4f4c8bd521..58fce4c5a9132 100644
--- a/test/new-e2e/pkg/environments/aws/kubernetes/kind.go
+++ b/test/new-e2e/pkg/environments/aws/kubernetes/kind.go
@@ -7,15 +7,25 @@
package awskubernetes
import (
+ "context"
"fmt"
+ "github.com/DataDog/test-infra-definitions/common/utils"
+
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional"
- "github.com/DataDog/test-infra-definitions/common/config"
- "github.com/DataDog/test-infra-definitions/components/datadog/agent"
+ "github.com/DataDog/test-infra-definitions/components/datadog/agent/helm"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/cpustress"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/dogstatsd"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/mutatedbyadmissioncontroller"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/nginx"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/prometheus"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/redis"
+ "github.com/DataDog/test-infra-definitions/components/datadog/apps/tracegen"
+ dogstatsdstandalone "github.com/DataDog/test-infra-definitions/components/datadog/dogstatsd-standalone"
+ fakeintakeComp "github.com/DataDog/test-infra-definitions/components/datadog/fakeintake"
"github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams"
kubeComp "github.com/DataDog/test-infra-definitions/components/kubernetes"
"github.com/DataDog/test-infra-definitions/resources/aws"
@@ -32,99 +42,16 @@ const (
defaultVMName = "kind"
)
-// ProvisionerParams contains all the parameters needed to create the environment
-type ProvisionerParams struct {
- name string
- vmOptions []ec2.VMOption
- agentOptions []kubernetesagentparams.Option
- fakeintakeOptions []fakeintake.Option
- extraConfigParams runner.ConfigMap
- workloadAppFuncs []WorkloadAppFunc
-}
-
-func newProvisionerParams() *ProvisionerParams {
- return &ProvisionerParams{
- name: defaultVMName,
- vmOptions: []ec2.VMOption{},
- agentOptions: []kubernetesagentparams.Option{},
- fakeintakeOptions: []fakeintake.Option{},
- extraConfigParams: runner.ConfigMap{},
- workloadAppFuncs: []WorkloadAppFunc{},
- }
-}
-
-// ProvisionerOption is a function that modifies the ProvisionerParams
-type ProvisionerOption func(*ProvisionerParams) error
-
-// WithName sets the name of the provisioner
-func WithName(name string) ProvisionerOption {
- return func(params *ProvisionerParams) error {
- params.name = name
- return nil
- }
-}
-
-// WithEC2VMOptions adds options to the EC2 VM
-func WithEC2VMOptions(opts ...ec2.VMOption) ProvisionerOption {
- return func(params *ProvisionerParams) error {
- params.vmOptions = opts
- return nil
- }
-}
-
-// WithAgentOptions adds options to the agent
-func WithAgentOptions(opts ...kubernetesagentparams.Option) ProvisionerOption {
- return func(params *ProvisionerParams) error {
- params.agentOptions = opts
- return nil
- }
-}
-
-// WithFakeIntakeOptions adds options to the fake intake
-func WithFakeIntakeOptions(opts ...fakeintake.Option) ProvisionerOption {
- return func(params *ProvisionerParams) error {
- params.fakeintakeOptions = opts
- return nil
- }
-}
-
-// WithoutFakeIntake removes the fake intake
-func WithoutFakeIntake() ProvisionerOption {
- return func(params *ProvisionerParams) error {
- params.fakeintakeOptions = nil
- return nil
- }
-}
-
-// WithoutAgent removes the agent
-func WithoutAgent() ProvisionerOption {
- return func(params *ProvisionerParams) error {
- params.agentOptions = nil
- return nil
- }
-}
-
-// WithExtraConfigParams adds extra config parameters to the environment
-func WithExtraConfigParams(configMap runner.ConfigMap) ProvisionerOption {
- return func(params *ProvisionerParams) error {
- params.extraConfigParams = configMap
- return nil
- }
-}
-
-// WorkloadAppFunc is a function that deploys a workload app to a kube provider
-type WorkloadAppFunc func(e config.CommonEnvironment, kubeProvider *kubernetes.Provider) (*kubeComp.Workload, error)
-
-// WithWorkloadApp adds a workload app to the environment
-func WithWorkloadApp(appFunc WorkloadAppFunc) ProvisionerOption {
- return func(params *ProvisionerParams) error {
- params.workloadAppFuncs = append(params.workloadAppFuncs, appFunc)
- return nil
+func kindDiagnoseFunc(ctx context.Context, stackName string) (string, error) {
+ dumpResult, err := dumpKindClusterState(ctx, stackName)
+ if err != nil {
+ return "", err
}
+ return fmt.Sprintf("Dumping Kind cluster state:\n%s", dumpResult), nil
}
-// Provisioner creates a new provisioner
-func Provisioner(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Kubernetes] {
+// KindProvisioner creates a new provisioner
+func KindProvisioner(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Kubernetes] {
// We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times.
// and it's easy to forget about it, leading to hard to debug issues.
params := newProvisionerParams()
@@ -139,6 +66,8 @@ func Provisioner(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Ku
return KindRunFunc(ctx, env, params)
}, params.extraConfigParams)
+ provisioner.SetDiagnoseFunc(kindDiagnoseFunc)
+
return provisioner
}
@@ -154,10 +83,16 @@ func KindRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Prov
return err
}
- kindCluster, err := kubeComp.NewKindCluster(*awsEnv.CommonEnvironment, host, awsEnv.CommonNamer.ResourceName("kind"), params.name, awsEnv.KubernetesVersion())
+ installEcrCredsHelperCmd, err := ec2.InstallECRCredentialsHelper(awsEnv, host)
if err != nil {
return err
}
+
+ kindCluster, err := kubeComp.NewKindCluster(&awsEnv, host, awsEnv.CommonNamer().ResourceName("kind"), params.name, awsEnv.KubernetesVersion(), utils.PulumiDependsOn(installEcrCredsHelperCmd))
+ if err != nil {
+ return err
+ }
+
err = kindCluster.Export(ctx, &env.KubernetesCluster.ClusterOutput)
if err != nil {
return err
@@ -171,10 +106,11 @@ func KindRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Prov
return err
}
+ var fakeIntake *fakeintakeComp.Fakeintake
if params.fakeintakeOptions != nil {
fakeintakeOpts := []fakeintake.Option{fakeintake.WithLoadBalancer()}
params.fakeintakeOptions = append(fakeintakeOpts, params.fakeintakeOptions...)
- fakeIntake, err := fakeintake.NewECSFargateInstance(awsEnv, params.name, params.fakeintakeOptions...)
+ fakeIntake, err = fakeintake.NewECSFargateInstance(awsEnv, params.name, params.fakeintakeOptions...)
if err != nil {
return err
}
@@ -191,6 +127,7 @@ func KindRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *Prov
env.FakeIntake = nil
}
+ var dependsOnCrd []pulumi.Resource
if params.agentOptions != nil {
kindClusterName := ctx.Stack()
helmValues := fmt.Sprintf(`
@@ -204,7 +141,7 @@ agents:
newOpts := []kubernetesagentparams.Option{kubernetesagentparams.WithHelmValues(helmValues)}
params.agentOptions = append(newOpts, params.agentOptions...)
- agent, err := agent.NewKubernetesAgent(*awsEnv.CommonEnvironment, kindClusterName, kubeProvider, params.agentOptions...)
+ agent, err := helm.NewKubernetesAgent(&awsEnv, kindClusterName, kubeProvider, params.agentOptions...)
if err != nil {
return err
}
@@ -212,13 +149,58 @@ agents:
if err != nil {
return err
}
-
+ dependsOnCrd = append(dependsOnCrd, agent)
} else {
env.Agent = nil
}
+ if params.deployDogstatsd {
+ if _, err := dogstatsdstandalone.K8sAppDefinition(&awsEnv, kubeProvider, "dogstatsd-standalone", fakeIntake, false, ctx.Stack()); err != nil {
+ return err
+ }
+ }
+
+ // Deploy testing workload
+ if params.deployTestWorkload {
+ // dogstatsd clients that report to the Agent
+ if _, err := dogstatsd.K8sAppDefinition(&awsEnv, kubeProvider, "workload-dogstatsd", 8125, "/var/run/datadog/dsd.socket"); err != nil {
+ return err
+ }
+
+ // dogstatsd clients that report to the dogstatsd standalone deployment
+ if _, err := dogstatsd.K8sAppDefinition(&awsEnv, kubeProvider, "workload-dogstatsd-standalone", dogstatsdstandalone.HostPort, dogstatsdstandalone.Socket); err != nil {
+ return err
+ }
+
+ if _, err := tracegen.K8sAppDefinition(&awsEnv, kubeProvider, "workload-tracegen"); err != nil {
+ return err
+ }
+
+ if _, err := prometheus.K8sAppDefinition(&awsEnv, kubeProvider, "workload-prometheus"); err != nil {
+ return err
+ }
+
+ if _, err := mutatedbyadmissioncontroller.K8sAppDefinition(&awsEnv, kubeProvider, "workload-mutated", "workload-mutated-lib-injection"); err != nil {
+ return err
+ }
+
+ // These workloads can be deployed only if the agent is installed, they rely on CRDs installed by Agent helm chart
+ if params.agentOptions != nil {
+ if _, err := nginx.K8sAppDefinition(&awsEnv, kubeProvider, "workload-nginx", "", true, utils.PulumiDependsOn(dependsOnCrd...)); err != nil {
+ return err
+ }
+
+ if _, err := redis.K8sAppDefinition(&awsEnv, kubeProvider, "workload-redis", true, utils.PulumiDependsOn(dependsOnCrd...)); err != nil {
+ return err
+ }
+
+ if _, err := cpustress.K8sAppDefinition(&awsEnv, kubeProvider, "workload-cpustress", utils.PulumiDependsOn(dependsOnCrd...)); err != nil {
+ return err
+ }
+ }
+ }
for _, appFunc := range params.workloadAppFuncs {
- _, err := appFunc(*awsEnv.CommonEnvironment, kubeProvider)
+ _, err := appFunc(&awsEnv, kubeProvider)
if err != nil {
return err
}
diff --git a/test/new-e2e/pkg/environments/aws/kubernetes/kubernetes_dump.go b/test/new-e2e/pkg/environments/aws/kubernetes/kubernetes_dump.go
new file mode 100644
index 0000000000000..b786008af1d3b
--- /dev/null
+++ b/test/new-e2e/pkg/environments/aws/kubernetes/kubernetes_dump.go
@@ -0,0 +1,285 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+package awskubernetes
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "os/user"
+ "strings"
+ "sync"
+
+ "github.com/DataDog/datadog-agent/pkg/util/pointer"
+ awsconfig "github.com/aws/aws-sdk-go-v2/config"
+ awsec2 "github.com/aws/aws-sdk-go-v2/service/ec2"
+ awsec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types"
+ awseks "github.com/aws/aws-sdk-go-v2/service/eks"
+ awsekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types"
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/crypto/ssh/agent"
+ "k8s.io/cli-runtime/pkg/genericclioptions"
+ "k8s.io/cli-runtime/pkg/genericiooptions"
+ "k8s.io/client-go/tools/clientcmd"
+ clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+ kubectlget "k8s.io/kubectl/pkg/cmd/get"
+ kubectlutil "k8s.io/kubectl/pkg/cmd/util"
+)
+
+func dumpEKSClusterState(ctx context.Context, name string) (ret string, err error) {
+ var out strings.Builder
+ defer func() { ret = out.String() }()
+
+ cfg, err := awsconfig.LoadDefaultConfig(ctx)
+ if err != nil {
+ return "", fmt.Errorf("failed to load AWS config: %v", err)
+ }
+
+ client := awseks.NewFromConfig(cfg)
+
+ clusterDescription, err := client.DescribeCluster(ctx, &awseks.DescribeClusterInput{
+ Name: &name,
+ })
+ if err != nil {
+ return "", fmt.Errorf("failed to describe cluster %s: %v", name, err)
+ }
+
+ cluster := clusterDescription.Cluster
+ if cluster.Status != awsekstypes.ClusterStatusActive {
+ return "", fmt.Errorf("EKS cluster %s is not in active state. Current status: %s", name, cluster.Status)
+ }
+
+ kubeconfig := clientcmdapi.NewConfig()
+ kubeconfig.Clusters[name] = &clientcmdapi.Cluster{
+ Server: *cluster.Endpoint,
+ }
+ if kubeconfig.Clusters[name].CertificateAuthorityData, err = base64.StdEncoding.DecodeString(*cluster.CertificateAuthority.Data); err != nil {
+ return "", fmt.Errorf("failed to decode certificate authority: %v", err)
+ }
+ kubeconfig.AuthInfos[name] = &clientcmdapi.AuthInfo{
+ Exec: &clientcmdapi.ExecConfig{
+ APIVersion: "client.authentication.k8s.io/v1beta1",
+ Command: "aws",
+ Args: []string{
+ "--region",
+ cfg.Region,
+ "eks",
+ "get-token",
+ "--cluster-name",
+ name,
+ "--output",
+ "json",
+ },
+ },
+ }
+ kubeconfig.Contexts[name] = &clientcmdapi.Context{
+ Cluster: name,
+ AuthInfo: name,
+ }
+ kubeconfig.CurrentContext = name
+
+ err = dumpK8sClusterState(ctx, kubeconfig, &out)
+ if err != nil {
+ return ret, fmt.Errorf("failed to dump cluster state: %v", err)
+ }
+
+ return
+}
+
+func dumpKindClusterState(ctx context.Context, name string) (ret string, err error) {
+ var out strings.Builder
+ defer func() { ret = out.String() }()
+
+ cfg, err := awsconfig.LoadDefaultConfig(ctx)
+ if err != nil {
+ return "", fmt.Errorf("failed to load AWS config: %v", err)
+ }
+ ec2Client := awsec2.NewFromConfig(cfg)
+
+ user, _ := user.Current()
+ instancesDescription, err := ec2Client.DescribeInstances(ctx, &awsec2.DescribeInstancesInput{
+ Filters: []awsec2types.Filter{
+ {
+ Name: pointer.Ptr("tag:managed-by"),
+ Values: []string{"pulumi"},
+ },
+ {
+ Name: pointer.Ptr("tag:username"),
+ Values: []string{user.Username},
+ },
+ {
+ Name: pointer.Ptr("tag:Name"),
+ Values: []string{name + "-aws-kind"},
+ },
+ },
+ })
+ if err != nil {
+ return ret, fmt.Errorf("failed to describe instances: %v", err)
+ }
+
+ // instancesDescription.Reservations = []
+ if instancesDescription == nil || (len(instancesDescription.Reservations) > 0 && len(instancesDescription.Reservations[0].Instances) != 1) {
+ return ret, fmt.Errorf("did not find exactly one instance for cluster %s", name)
+ }
+
+ instanceIP := instancesDescription.Reservations[0].Instances[0].PrivateIpAddress
+
+ auth := []ssh.AuthMethod{}
+
+ if sshAgentSocket, found := os.LookupEnv("SSH_AUTH_SOCK"); found {
+ sshAgent, err := net.Dial("unix", sshAgentSocket)
+ if err != nil {
+ return "", fmt.Errorf("failed to dial SSH agent: %v", err)
+ }
+ defer sshAgent.Close()
+
+ auth = append(auth, ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers))
+ }
+
+ if sshKeyPath, found := os.LookupEnv("E2E_PRIVATE_KEY_PATH"); found {
+ sshKey, err := os.ReadFile(sshKeyPath)
+ if err != nil {
+ return ret, fmt.Errorf("failed to read SSH key: %v", err)
+ }
+
+ signer, err := ssh.ParsePrivateKey(sshKey)
+ if err != nil {
+ return ret, fmt.Errorf("failed to parse SSH key: %v", err)
+ }
+
+ auth = append(auth, ssh.PublicKeys(signer))
+ }
+
+ sshClient, err := ssh.Dial("tcp", *instanceIP+":22", &ssh.ClientConfig{
+ User: "ubuntu",
+ Auth: auth,
+ HostKeyCallback: ssh.InsecureIgnoreHostKey(),
+ })
+ if err != nil {
+ return ret, fmt.Errorf("failed to dial SSH server %s: %v", *instanceIP, err)
+ }
+ defer sshClient.Close()
+
+ sshSession, err := sshClient.NewSession()
+ if err != nil {
+ return ret, fmt.Errorf("failed to create SSH session: %v", err)
+ }
+ defer sshSession.Close()
+
+ stdout, err := sshSession.StdoutPipe()
+ if err != nil {
+ return ret, fmt.Errorf("failed to create stdout pipe: %v", err)
+ }
+
+ stderr, err := sshSession.StderrPipe()
+ if err != nil {
+ return ret, fmt.Errorf("failed to create stderr pipe: %v", err)
+ }
+
+ err = sshSession.Start("kind get kubeconfig --name \"$(kind get clusters | head -n 1)\"")
+ if err != nil {
+ return ret, fmt.Errorf("failed to start remote command: %v", err)
+ }
+
+ var stdoutBuf bytes.Buffer
+
+ var wg sync.WaitGroup
+ wg.Add(2)
+ errChannel := make(chan error, 2)
+
+ go func() {
+ if _, err := io.Copy(&stdoutBuf, stdout); err != nil {
+ errChannel <- fmt.Errorf("failed to read stdout: %v", err)
+ }
+ wg.Done()
+ }()
+
+ go func() {
+ if _, err := io.Copy(&out, stderr); err != nil {
+ errChannel <- fmt.Errorf("failed to read stderr: %v", err)
+ }
+ wg.Done()
+ }()
+
+ err = sshSession.Wait()
+ wg.Wait()
+ close(errChannel)
+ for err := range errChannel {
+ if err != nil {
+ return ret, err
+ }
+ }
+
+ if err != nil {
+ return ret, fmt.Errorf("remote command exited with error: %v", err)
+ }
+
+ kubeconfig, err := clientcmd.Load(stdoutBuf.Bytes())
+ if err != nil {
+ return ret, fmt.Errorf("failed to parse kubeconfig: %v", err)
+ }
+
+ for _, cluster := range kubeconfig.Clusters {
+ cluster.Server = strings.Replace(cluster.Server, "0.0.0.0", *instanceIP, 1)
+ cluster.CertificateAuthorityData = nil
+ cluster.InsecureSkipTLSVerify = true
+ }
+
+ err = dumpK8sClusterState(ctx, kubeconfig, &out)
+ if err != nil {
+ return ret, fmt.Errorf("failed to dump cluster state: %v", err)
+ }
+
+ return ret, nil
+}
+
+func dumpK8sClusterState(ctx context.Context, kubeconfig *clientcmdapi.Config, out *strings.Builder) error {
+ kubeconfigFile, err := os.CreateTemp("", "kubeconfig")
+ if err != nil {
+ return fmt.Errorf("failed to create kubeconfig temporary file: %v", err)
+ }
+ defer os.Remove(kubeconfigFile.Name())
+
+ if err := clientcmd.WriteToFile(*kubeconfig, kubeconfigFile.Name()); err != nil {
+ return fmt.Errorf("failed to write kubeconfig file: %v", err)
+ }
+
+ if err := kubeconfigFile.Close(); err != nil {
+ return fmt.Errorf("failed to close kubeconfig file: %v", err)
+ }
+
+ fmt.Fprintf(out, "\n")
+
+ configFlags := genericclioptions.NewConfigFlags(false)
+ kubeconfigFileName := kubeconfigFile.Name()
+ configFlags.KubeConfig = &kubeconfigFileName
+
+ factory := kubectlutil.NewFactory(configFlags)
+
+ streams := genericiooptions.IOStreams{
+ Out: out,
+ ErrOut: out,
+ }
+
+ getCmd := kubectlget.NewCmdGet("", factory, streams)
+ getCmd.SetOut(out)
+ getCmd.SetErr(out)
+ getCmd.SetContext(ctx)
+ getCmd.SetArgs([]string{
+ "nodes,all",
+ "--all-namespaces",
+ "-o",
+ "wide",
+ })
+ if err := getCmd.ExecuteContext(ctx); err != nil {
+ return fmt.Errorf("failed to execute kubectl get: %v", err)
+ }
+ return nil
+}
diff --git a/test/new-e2e/pkg/environments/aws/kubernetes/params.go b/test/new-e2e/pkg/environments/aws/kubernetes/params.go
new file mode 100644
index 0000000000000..9f7e9f1c394ff
--- /dev/null
+++ b/test/new-e2e/pkg/environments/aws/kubernetes/params.go
@@ -0,0 +1,173 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package awskubernetes contains the provisioner for the Kubernetes based environments
+package awskubernetes
+
+import (
+ "fmt"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional"
+
+ "github.com/DataDog/test-infra-definitions/common/config"
+ "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams"
+ kubeComp "github.com/DataDog/test-infra-definitions/components/kubernetes"
+ "github.com/DataDog/test-infra-definitions/resources/aws"
+ "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2"
+ "github.com/DataDog/test-infra-definitions/scenarios/aws/eks"
+ "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake"
+
+ "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes"
+)
+
+// ProvisionerParams contains all the parameters needed to create the environment
+type ProvisionerParams struct {
+ name string
+ vmOptions []ec2.VMOption
+ agentOptions []kubernetesagentparams.Option
+ fakeintakeOptions []fakeintake.Option
+ eksOptions []eks.Option
+ extraConfigParams runner.ConfigMap
+ workloadAppFuncs []WorkloadAppFunc
+
+ eksLinuxNodeGroup bool
+ eksLinuxARMNodeGroup bool
+ eksBottlerocketNodeGroup bool
+ eksWindowsNodeGroup bool
+ awsEnv *aws.Environment
+ deployDogstatsd bool
+ deployTestWorkload bool
+}
+
+func newProvisionerParams() *ProvisionerParams {
+ return &ProvisionerParams{
+ name: defaultVMName,
+ vmOptions: []ec2.VMOption{},
+ agentOptions: []kubernetesagentparams.Option{},
+ fakeintakeOptions: []fakeintake.Option{},
+ eksOptions: []eks.Option{},
+ extraConfigParams: runner.ConfigMap{},
+ workloadAppFuncs: []WorkloadAppFunc{},
+
+ eksLinuxNodeGroup: false,
+ eksLinuxARMNodeGroup: false,
+ eksBottlerocketNodeGroup: false,
+ eksWindowsNodeGroup: false,
+ deployDogstatsd: false,
+ }
+}
+
+// GetProvisionerParams return ProvisionerParams from options opts setup
+func GetProvisionerParams(opts ...ProvisionerOption) *ProvisionerParams {
+ params := newProvisionerParams()
+ err := optional.ApplyOptions(params, opts)
+ if err != nil {
+ panic(fmt.Errorf("unable to apply ProvisionerOption, err: %w", err))
+ }
+ return params
+}
+
+// ProvisionerOption is a function that modifies the ProvisionerParams
+type ProvisionerOption func(*ProvisionerParams) error
+
+// WithName sets the name of the provisioner
+func WithName(name string) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.name = name
+ return nil
+ }
+}
+
+// WithEC2VMOptions adds options to the EC2 VM
+func WithEC2VMOptions(opts ...ec2.VMOption) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.vmOptions = opts
+ return nil
+ }
+}
+
+// WithAgentOptions adds options to the agent
+func WithAgentOptions(opts ...kubernetesagentparams.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentOptions = opts
+ return nil
+ }
+}
+
+// WithFakeIntakeOptions adds options to the fake intake
+func WithFakeIntakeOptions(opts ...fakeintake.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.fakeintakeOptions = opts
+ return nil
+ }
+}
+
+// WithEKSOptions adds options to the EKS cluster
+func WithEKSOptions(opts ...eks.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.eksOptions = opts
+ return nil
+ }
+}
+
+// WithDeployDogstatsd deploy standalone dogstatd
+func WithDeployDogstatsd() ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.deployDogstatsd = true
+ return nil
+ }
+}
+
+// WithDeployTestWorkload deploy a test workload
+func WithDeployTestWorkload() ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.deployTestWorkload = true
+ return nil
+ }
+}
+
+// WithoutFakeIntake removes the fake intake
+func WithoutFakeIntake() ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.fakeintakeOptions = nil
+ return nil
+ }
+}
+
+// WithoutAgent removes the agent
+func WithoutAgent() ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentOptions = nil
+ return nil
+ }
+}
+
+// WithExtraConfigParams adds extra config parameters to the environment
+func WithExtraConfigParams(configMap runner.ConfigMap) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.extraConfigParams = configMap
+ return nil
+ }
+}
+
+// WorkloadAppFunc is a function that deploys a workload app to a kube provider
+type WorkloadAppFunc func(e config.Env, kubeProvider *kubernetes.Provider) (*kubeComp.Workload, error)
+
+// WithWorkloadApp adds a workload app to the environment
+func WithWorkloadApp(appFunc WorkloadAppFunc) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.workloadAppFuncs = append(params.workloadAppFuncs, appFunc)
+ return nil
+ }
+}
+
+// WithAwsEnv asks the provisioner to use the given environment, it is created otherwise
+func WithAwsEnv(env *aws.Environment) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.awsEnv = env
+ return nil
+ }
+}
diff --git a/test/new-e2e/pkg/environments/azure/host/linux/host.go b/test/new-e2e/pkg/environments/azure/host/linux/host.go
new file mode 100644
index 0000000000000..5006c4c4750e3
--- /dev/null
+++ b/test/new-e2e/pkg/environments/azure/host/linux/host.go
@@ -0,0 +1,126 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package azurehost contains the definition of the Azure Host environment.
+package azurehost
+
+import (
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/test-infra-definitions/components/os"
+ "github.com/DataDog/test-infra-definitions/resources/azure"
+ "github.com/DataDog/test-infra-definitions/scenarios/azure/compute"
+ "github.com/DataDog/test-infra-definitions/scenarios/azure/fakeintake"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
+
+ "github.com/DataDog/test-infra-definitions/components/datadog/agent"
+ "github.com/DataDog/test-infra-definitions/components/datadog/agentparams"
+ "github.com/DataDog/test-infra-definitions/components/datadog/updater"
+ "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+)
+
+const (
+ provisionerBaseID = "azure-vm-"
+ defaultVMName = "vm"
+)
+
+// Provisioner creates a VM environment with an VM, a FakeIntake and a Host Agent configured to talk to each other.
+// FakeIntake and Agent creation can be deactivated by using [WithoutFakeIntake] and [WithoutAgent] options.
+func Provisioner(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Host] {
+ // We need to build params here to be able to use params.name in the provisioner name
+ params := GetProvisionerParams(opts...)
+
+ provisioner := e2e.NewTypedPulumiProvisioner(provisionerBaseID+params.name, func(ctx *pulumi.Context, env *environments.Host) error {
+ // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times.
+ // and it's easy to forget about it, leading to hard-to-debug issues.
+ params := GetProvisionerParams(opts...)
+ return Run(ctx, env, RunParams{ProvisionerParams: params})
+ }, params.extraConfigParams)
+
+ return provisioner
+}
+
+// Run deploys an environment given a pulumi.Context
+func Run(ctx *pulumi.Context, env *environments.Host, runParams RunParams) error {
+ var azureEnv azure.Environment
+ if runParams.Environment == nil {
+ var err error
+ azureEnv, err = azure.NewEnvironment(ctx)
+ if err != nil {
+ return err
+ }
+ } else {
+ azureEnv = *runParams.Environment
+ }
+ params := runParams.ProvisionerParams
+ params.instanceOptions = append(params.instanceOptions, compute.WithOS(os.UbuntuDefault))
+
+ host, err := compute.NewVM(azureEnv, params.name, params.instanceOptions...)
+ if err != nil {
+ return err
+ }
+ err = host.Export(ctx, &env.RemoteHost.HostOutput)
+ if err != nil {
+ return err
+ }
+
+ // Create FakeIntake if required
+ if params.fakeintakeOptions != nil {
+ fakeIntake, err := fakeintake.NewVMInstance(azureEnv, params.fakeintakeOptions...)
+ if err != nil {
+ return err
+ }
+ err = fakeIntake.Export(ctx, &env.FakeIntake.FakeintakeOutput)
+ if err != nil {
+ return err
+ }
+
+ // Normally if FakeIntake is enabled, Agent is enabled, but just in case
+ if params.agentOptions != nil {
+ // Prepend in case it's overridden by the user
+ newOpts := []agentparams.Option{agentparams.WithFakeintake(fakeIntake)}
+ params.agentOptions = append(newOpts, params.agentOptions...)
+ }
+ } else {
+ // Suite inits all fields by default, so we need to explicitly set it to nil
+ env.FakeIntake = nil
+ }
+ if !params.installUpdater {
+ // Suite inits all fields by default, so we need to explicitly set it to nil
+ env.Updater = nil
+ }
+
+ // Create Agent if required
+ if params.installUpdater && params.agentOptions != nil {
+ updater, err := updater.NewHostUpdater(&azureEnv, host, params.agentOptions...)
+ if err != nil {
+ return err
+ }
+
+ err = updater.Export(ctx, &env.Updater.HostUpdaterOutput)
+ if err != nil {
+ return err
+ }
+ // todo: add agent once updater installs agent on bootstrap
+ env.Agent = nil
+ } else if params.agentOptions != nil {
+ agent, err := agent.NewHostAgent(&azureEnv, host, params.agentOptions...)
+ if err != nil {
+ return err
+ }
+
+ err = agent.Export(ctx, &env.Agent.HostAgentOutput)
+ if err != nil {
+ return err
+ }
+
+ env.Agent.ClientOptions = params.agentClientOptions
+ } else {
+ // Suite inits all fields by default, so we need to explicitly set it to nil
+ env.Agent = nil
+ }
+
+ return nil
+}
diff --git a/test/new-e2e/pkg/environments/azure/host/linux/params.go b/test/new-e2e/pkg/environments/azure/host/linux/params.go
new file mode 100644
index 0000000000000..ff32a326f06eb
--- /dev/null
+++ b/test/new-e2e/pkg/environments/azure/host/linux/params.go
@@ -0,0 +1,152 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package azurehost
+
+import (
+ "fmt"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional"
+ "github.com/DataDog/test-infra-definitions/components/datadog/agentparams"
+ "github.com/DataDog/test-infra-definitions/resources/azure"
+ "github.com/DataDog/test-infra-definitions/scenarios/azure/compute"
+ "github.com/DataDog/test-infra-definitions/scenarios/azure/fakeintake"
+)
+
+// ProvisionerParams is a set of parameters for the Provisioner.
+type ProvisionerParams struct {
+ name string
+
+ instanceOptions []compute.VMOption
+ agentOptions []agentparams.Option
+ agentClientOptions []agentclientparams.Option
+ fakeintakeOptions []fakeintake.Option
+ extraConfigParams runner.ConfigMap
+ installUpdater bool
+}
+
+func newProvisionerParams() *ProvisionerParams {
+ // We use nil arrays to decide if we should create or not
+ return &ProvisionerParams{
+ name: defaultVMName,
+ instanceOptions: []compute.VMOption{},
+ agentOptions: []agentparams.Option{},
+ agentClientOptions: []agentclientparams.Option{},
+ fakeintakeOptions: []fakeintake.Option{},
+ extraConfigParams: runner.ConfigMap{},
+ }
+}
+
+// GetProvisionerParams return ProvisionerParams from options opts setup
+func GetProvisionerParams(opts ...ProvisionerOption) *ProvisionerParams {
+ params := newProvisionerParams()
+ err := optional.ApplyOptions(params, opts)
+ if err != nil {
+ panic(fmt.Errorf("unable to apply ProvisionerOption, err: %w", err))
+ }
+ return params
+}
+
+// ProvisionerOption is a provisioner option.
+type ProvisionerOption func(*ProvisionerParams) error
+
+// WithName sets the name of the provisioner.
+func WithName(name string) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.name = name
+ return nil
+ }
+}
+
+// WithInstanceOptions adds options to the EC2 VM.
+func WithInstanceOptions(opts ...compute.VMOption) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.instanceOptions = append(params.instanceOptions, opts...)
+ return nil
+ }
+}
+
+// WithAgentOptions adds options to the Agent.
+func WithAgentOptions(opts ...agentparams.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentOptions = append(params.agentOptions, opts...)
+ return nil
+ }
+}
+
+// WithAgentClientOptions adds options to the Agent client.
+func WithAgentClientOptions(opts ...agentclientparams.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentClientOptions = append(params.agentClientOptions, opts...)
+ return nil
+ }
+}
+
+// WithFakeIntakeOptions adds options to the FakeIntake.
+func WithFakeIntakeOptions(opts ...fakeintake.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.fakeintakeOptions = append(params.fakeintakeOptions, opts...)
+ return nil
+ }
+}
+
+// WithExtraConfigParams adds extra config parameters to the ConfigMap.
+func WithExtraConfigParams(configMap runner.ConfigMap) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.extraConfigParams = configMap
+ return nil
+ }
+}
+
+// WithoutFakeIntake disables the creation of the FakeIntake.
+func WithoutFakeIntake() ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.fakeintakeOptions = nil
+ return nil
+ }
+}
+
+// WithoutAgent disables the creation of the Agent.
+func WithoutAgent() ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentOptions = nil
+ return nil
+ }
+}
+
+// WithUpdater installs the agent through the updater.
+func WithUpdater() ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.installUpdater = true
+ return nil
+ }
+}
+
+// ProvisionerNoAgentNoFakeIntake wraps Provisioner with hardcoded WithoutAgent and WithoutFakeIntake options.
+func ProvisionerNoAgentNoFakeIntake(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Host] {
+ mergedOpts := make([]ProvisionerOption, 0, len(opts)+2)
+ mergedOpts = append(mergedOpts, opts...)
+ mergedOpts = append(mergedOpts, WithoutAgent(), WithoutFakeIntake())
+
+ return Provisioner(mergedOpts...)
+}
+
+// ProvisionerNoFakeIntake wraps Provisioner with hardcoded WithoutFakeIntake option.
+func ProvisionerNoFakeIntake(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Host] {
+ mergedOpts := make([]ProvisionerOption, 0, len(opts)+1)
+ mergedOpts = append(mergedOpts, opts...)
+ mergedOpts = append(mergedOpts, WithoutFakeIntake())
+
+ return Provisioner(mergedOpts...)
+}
+
+// RunParams is a set of parameters for the Run function.
+type RunParams struct {
+ Environment *azure.Environment
+ ProvisionerParams *ProvisionerParams
+}
diff --git a/test/new-e2e/pkg/environments/azure/host/windows/host.go b/test/new-e2e/pkg/environments/azure/host/windows/host.go
new file mode 100644
index 0000000000000..d9e59b823e1d7
--- /dev/null
+++ b/test/new-e2e/pkg/environments/azure/host/windows/host.go
@@ -0,0 +1,153 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package winazurehost contains the definition of the Azure Windows Host environment.
+package winazurehost
+
+import (
+ "github.com/DataDog/test-infra-definitions/components/activedirectory"
+ "github.com/DataDog/test-infra-definitions/components/datadog/agent"
+ "github.com/DataDog/test-infra-definitions/components/datadog/agentparams"
+ "github.com/DataDog/test-infra-definitions/resources/azure"
+ "github.com/DataDog/test-infra-definitions/scenarios/azure/compute"
+ "github.com/DataDog/test-infra-definitions/scenarios/azure/fakeintake"
+ "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
+ "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/components/defender"
+)
+
+const (
+ provisionerBaseID = "azure-vm-"
+ defaultVMName = "vm"
+)
+
+// Provisioner creates a VM environment with a Windows VM, a FakeIntake and a Host Agent configured to talk to each other.
+// FakeIntake and Agent creation can be deactivated by using [WithoutFakeIntake] and [WithoutAgent] options.
+func Provisioner(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.WindowsHost] {
+ // We need to build params here to be able to use params.name in the provisioner name
+ params := getProvisionerParams(opts...)
+ provisioner := e2e.NewTypedPulumiProvisioner(provisionerBaseID+params.name, func(ctx *pulumi.Context, env *environments.WindowsHost) error {
+ // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times.
+ // and it's easy to forget about it, leading to hard-to-debug issues.
+ params := getProvisionerParams(opts...)
+ return Run(ctx, env, params)
+ }, nil)
+
+ return provisioner
+}
+
+// ProvisionerNoAgent wraps Provisioner with hardcoded WithoutAgent options.
+func ProvisionerNoAgent(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.WindowsHost] {
+ mergedOpts := make([]ProvisionerOption, 0, len(opts)+1)
+ mergedOpts = append(mergedOpts, opts...)
+ mergedOpts = append(mergedOpts, WithoutAgent())
+
+ return Provisioner(mergedOpts...)
+}
+
+// ProvisionerNoAgentNoFakeIntake wraps Provisioner with hardcoded WithoutAgent and WithoutFakeIntake options.
+func ProvisionerNoAgentNoFakeIntake(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.WindowsHost] {
+ mergedOpts := make([]ProvisionerOption, 0, len(opts)+2)
+ mergedOpts = append(mergedOpts, opts...)
+ mergedOpts = append(mergedOpts, WithoutAgent(), WithoutFakeIntake())
+
+ return Provisioner(mergedOpts...)
+}
+
+// ProvisionerNoFakeIntake wraps Provisioner with hardcoded WithoutFakeIntake option.
+func ProvisionerNoFakeIntake(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.WindowsHost] {
+ mergedOpts := make([]ProvisionerOption, 0, len(opts)+1)
+ mergedOpts = append(mergedOpts, opts...)
+ mergedOpts = append(mergedOpts, WithoutFakeIntake())
+
+ return Provisioner(mergedOpts...)
+}
+
+// Run deploys a Windows environment given a pulumi.Context
+func Run(ctx *pulumi.Context, env *environments.WindowsHost, params *ProvisionerParams) error {
+ azureEnv, err := azure.NewEnvironment(ctx)
+ if err != nil {
+ return err
+ }
+
+ host, err := compute.NewVM(azureEnv, params.name, params.instanceOptions...)
+ if err != nil {
+ return err
+ }
+ err = host.Export(ctx, &env.RemoteHost.HostOutput)
+ if err != nil {
+ return err
+ }
+
+ if params.defenderOptions != nil {
+ defender, err := defender.NewDefender(azureEnv.CommonEnvironment, host, params.defenderOptions...)
+ if err != nil {
+ return err
+ }
+ // Active Directory setup needs to happen after Windows Defender setup
+ params.activeDirectoryOptions = append(params.activeDirectoryOptions,
+ activedirectory.WithPulumiResourceOptions(
+ pulumi.DependsOn(defender.Resources)))
+ }
+
+ if params.activeDirectoryOptions != nil {
+ activeDirectoryComp, activeDirectoryResources, err := activedirectory.NewActiveDirectory(ctx, &azureEnv, host, params.activeDirectoryOptions...)
+ if err != nil {
+ return err
+ }
+ err = activeDirectoryComp.Export(ctx, &env.ActiveDirectory.Output)
+ if err != nil {
+ return err
+ }
+
+ if params.agentOptions != nil {
+ // Agent install needs to happen after ActiveDirectory setup
+ params.agentOptions = append(params.agentOptions,
+ agentparams.WithPulumiResourceOptions(
+ pulumi.DependsOn(activeDirectoryResources)))
+ }
+ } else {
+ // Suite inits all fields by default, so we need to explicitly set it to nil
+ env.ActiveDirectory = nil
+ }
+
+ // Create FakeIntake if required
+ if params.fakeintakeOptions != nil {
+ fakeIntake, err := fakeintake.NewVMInstance(azureEnv, params.fakeintakeOptions...)
+ if err != nil {
+ return err
+ }
+ err = fakeIntake.Export(ctx, &env.FakeIntake.FakeintakeOutput)
+ if err != nil {
+ return err
+ }
+ // Normally if FakeIntake is enabled, Agent is enabled, but just in case
+ if params.agentOptions != nil {
+ // Prepend in case it's overridden by the user
+ newOpts := []agentparams.Option{agentparams.WithFakeintake(fakeIntake)}
+ params.agentOptions = append(newOpts, params.agentOptions...)
+ }
+ } else {
+ env.FakeIntake = nil
+ }
+
+ if params.agentOptions != nil {
+ agent, err := agent.NewHostAgent(&azureEnv, host, params.agentOptions...)
+ if err != nil {
+ return err
+ }
+ err = agent.Export(ctx, &env.Agent.HostAgentOutput)
+ if err != nil {
+ return err
+ }
+ env.Agent.ClientOptions = params.agentClientOptions
+ } else {
+ env.Agent = nil
+ }
+
+ return nil
+}
diff --git a/test/new-e2e/pkg/environments/azure/host/windows/params.go b/test/new-e2e/pkg/environments/azure/host/windows/params.go
new file mode 100644
index 0000000000000..4d4fbd6c0eb36
--- /dev/null
+++ b/test/new-e2e/pkg/environments/azure/host/windows/params.go
@@ -0,0 +1,122 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package winazurehost
+
+import (
+ "fmt"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional"
+ "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/components/defender"
+ "github.com/DataDog/test-infra-definitions/components/activedirectory"
+ "github.com/DataDog/test-infra-definitions/components/datadog/agentparams"
+ "github.com/DataDog/test-infra-definitions/scenarios/azure/compute"
+ "github.com/DataDog/test-infra-definitions/scenarios/azure/fakeintake"
+)
+
+// ProvisionerParams is a set of parameters for the Provisioner.
+type ProvisionerParams struct {
+ name string
+
+ instanceOptions []compute.VMOption
+ agentOptions []agentparams.Option
+ agentClientOptions []agentclientparams.Option
+ fakeintakeOptions []fakeintake.Option
+ activeDirectoryOptions []activedirectory.Option
+ defenderOptions []defender.Option
+}
+
+// ProvisionerOption is a provisioner option.
+type ProvisionerOption func(*ProvisionerParams) error
+
+// WithName sets the name of the provisioner.
+func WithName(name string) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.name = name
+ return nil
+ }
+}
+
+// WithInstanceOptions adds options to the VM.
+func WithInstanceOptions(opts ...compute.VMOption) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.instanceOptions = append(params.instanceOptions, opts...)
+ return nil
+ }
+}
+
+// WithAgentOptions adds options to the Agent.
+func WithAgentOptions(opts ...agentparams.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentOptions = append(params.agentOptions, opts...)
+ return nil
+ }
+}
+
+// WithoutAgent disables the creation of the Agent.
+func WithoutAgent() ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentOptions = nil
+ return nil
+ }
+}
+
+// WithAgentClientOptions adds options to the Agent client.
+func WithAgentClientOptions(opts ...agentclientparams.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentClientOptions = append(params.agentClientOptions, opts...)
+ return nil
+ }
+}
+
+// WithFakeIntakeOptions adds options to the FakeIntake.
+func WithFakeIntakeOptions(opts ...fakeintake.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.fakeintakeOptions = append(params.fakeintakeOptions, opts...)
+ return nil
+ }
+}
+
+// WithoutFakeIntake disables the creation of the FakeIntake.
+func WithoutFakeIntake() ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.fakeintakeOptions = nil
+ return nil
+ }
+}
+
+// WithActiveDirectoryOptions adds Active Directory to the EC2 VM.
+func WithActiveDirectoryOptions(opts ...activedirectory.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.activeDirectoryOptions = append(params.activeDirectoryOptions, opts...)
+ return nil
+ }
+}
+
+// WithDefenderOptions configures Windows Defender on an EC2 VM.
+func WithDefenderOptions(opts ...defender.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.defenderOptions = append(params.defenderOptions, opts...)
+ return nil
+ }
+}
+
+func getProvisionerParams(opts ...ProvisionerOption) *ProvisionerParams {
+ params := &ProvisionerParams{
+ name: defaultVMName,
+ instanceOptions: []compute.VMOption{},
+ agentOptions: []agentparams.Option{},
+ agentClientOptions: []agentclientparams.Option{},
+ fakeintakeOptions: []fakeintake.Option{},
+ // Disable Windows Defender on VMs by default
+ defenderOptions: []defender.Option{defender.WithDefenderDisabled()},
+ }
+ err := optional.ApplyOptions(params, opts)
+ if err != nil {
+ panic(fmt.Errorf("unable to apply ProvisionerOption, err: %w", err))
+ }
+ return params
+}
diff --git a/test/new-e2e/pkg/environments/azure/kubernetes/aks.go b/test/new-e2e/pkg/environments/azure/kubernetes/aks.go
new file mode 100644
index 0000000000000..829a76675469c
--- /dev/null
+++ b/test/new-e2e/pkg/environments/azure/kubernetes/aks.go
@@ -0,0 +1,112 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package azurekubernetes contains the provisioner for Azure Kubernetes Service (AKS)
+package azurekubernetes
+
+import (
+ "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+
+ "github.com/DataDog/test-infra-definitions/components/datadog/agent/helm"
+ "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams"
+ "github.com/DataDog/test-infra-definitions/resources/azure"
+ "github.com/DataDog/test-infra-definitions/scenarios/azure/aks"
+ "github.com/DataDog/test-infra-definitions/scenarios/azure/fakeintake"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional"
+)
+
+const (
+ provisionerBaseID = "azure-aks"
+)
+
+// AKSProvisioner creates a new provisioner for AKS on Azure
+func AKSProvisioner(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Kubernetes] {
+ // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times.
+ // and it's easy to forget about it, leading to hard to debug issues.
+ params := newProvisionerParams()
+ _ = optional.ApplyOptions(params, opts)
+
+ provisioner := e2e.NewTypedPulumiProvisioner(provisionerBaseID+params.name, func(ctx *pulumi.Context, env *environments.Kubernetes) error {
+ // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times.
+ // and it's easy to forget about it, leading to hard to debug issues.
+ params := newProvisionerParams()
+ _ = optional.ApplyOptions(params, opts)
+
+ return AKSRunFunc(ctx, env, params)
+ }, params.extraConfigParams)
+
+ return provisioner
+}
+
+// AKSRunFunc is the run function for AKS provisioner
+func AKSRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *ProvisionerParams) error {
+ azureEnv, err := azure.NewEnvironment(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Create the AKS cluster
+ aksCluster, err := aks.NewAKSCluster(azureEnv, params.aksOptions...)
+ if err != nil {
+ return err
+ }
+ err = aksCluster.Export(ctx, &env.KubernetesCluster.ClusterOutput)
+ if err != nil {
+ return err
+ }
+
+ agentOptions := params.agentOptions
+
+ // Deploy a fakeintake
+ if params.fakeintakeOptions != nil {
+ fakeIntake, err := fakeintake.NewVMInstance(azureEnv, params.fakeintakeOptions...)
+ if err != nil {
+ return err
+ }
+ err = fakeIntake.Export(ctx, &env.FakeIntake.FakeintakeOutput)
+ if err != nil {
+ return err
+ }
+ agentOptions = append(agentOptions, kubernetesagentparams.WithFakeintake(fakeIntake))
+
+ } else {
+ env.FakeIntake = nil
+ }
+
+ if params.agentOptions != nil {
+ // On Kata nodes, AKS uses the node-name (like aks-kata-21213134-vmss000000) as the only SAN in the Kubelet
+ // certificate. However, the DNS name aks-kata-21213134-vmss000000 is not resolvable, so it cannot be used
+ // to reach the Kubelet. Thus we need to use `tlsVerify: false` and `and `status.hostIP` as `host` in
+ // the Helm values
+ customValues := `
+datadog:
+ kubelet:
+ host:
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ hostCAPath: /etc/kubernetes/certs/kubeletserver.crt
+ tlsVerify: false
+providers:
+ aks:
+ enabled: true
+`
+ agentOptions = append(agentOptions, kubernetesagentparams.WithHelmValues(customValues))
+ agent, err := helm.NewKubernetesAgent(&azureEnv, params.name, aksCluster.KubeProvider, agentOptions...)
+ if err != nil {
+ return err
+ }
+ err = agent.Export(ctx, &env.Agent.KubernetesAgentOutput)
+ if err != nil {
+ return err
+ }
+ } else {
+ env.Agent = nil
+ }
+ return nil
+}
diff --git a/test/new-e2e/pkg/environments/azure/kubernetes/params.go b/test/new-e2e/pkg/environments/azure/kubernetes/params.go
new file mode 100644
index 0000000000000..79b9526a3917d
--- /dev/null
+++ b/test/new-e2e/pkg/environments/azure/kubernetes/params.go
@@ -0,0 +1,100 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package azurekubernetes contains the provisioner for the Kubernetes based environments
+package azurekubernetes
+
+import (
+ "fmt"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional"
+
+ "github.com/DataDog/test-infra-definitions/common/config"
+ "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams"
+ kubeComp "github.com/DataDog/test-infra-definitions/components/kubernetes"
+ "github.com/DataDog/test-infra-definitions/scenarios/azure/aks"
+ "github.com/DataDog/test-infra-definitions/scenarios/azure/fakeintake"
+
+ "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes"
+)
+
+// ProvisionerParams contains all the parameters needed to create the environment
+type ProvisionerParams struct {
+ name string
+ fakeintakeOptions []fakeintake.Option
+ agentOptions []kubernetesagentparams.Option
+ aksOptions []aks.Option
+ workloadAppFuncs []WorkloadAppFunc
+ extraConfigParams runner.ConfigMap
+}
+
+func newProvisionerParams(opts ...ProvisionerOption) *ProvisionerParams {
+ params := &ProvisionerParams{
+ name: "aks",
+ fakeintakeOptions: []fakeintake.Option{},
+ agentOptions: []kubernetesagentparams.Option{},
+ workloadAppFuncs: []WorkloadAppFunc{},
+ }
+ err := optional.ApplyOptions(params, opts)
+ if err != nil {
+ panic(fmt.Sprintf("failed to apply options: %v", err))
+ }
+ return params
+}
+
+// ProvisionerOption is a function that modifies the ProvisionerParams
+type ProvisionerOption func(*ProvisionerParams) error
+
+// WithName sets the name of the provisioner
+func WithName(name string) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.name = name
+ return nil
+ }
+}
+
+// WithAgentOptions adds options to the agent
+func WithAgentOptions(opts ...kubernetesagentparams.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentOptions = opts
+ return nil
+ }
+}
+
+// WithFakeIntakeOptions adds options to the fake intake
+func WithFakeIntakeOptions(opts ...fakeintake.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.fakeintakeOptions = opts
+ return nil
+ }
+}
+
+// WithAKSOptions adds options to the AKS cluster
+func WithAKSOptions(opts ...aks.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.aksOptions = opts
+ return nil
+ }
+}
+
+// WithExtraConfigParams adds extra config parameters to the environment
+func WithExtraConfigParams(configMap runner.ConfigMap) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.extraConfigParams = configMap
+ return nil
+ }
+}
+
+// WorkloadAppFunc is a function that deploys a workload app to a kube provider
+type WorkloadAppFunc func(e config.Env, kubeProvider *kubernetes.Provider) (*kubeComp.Workload, error)
+
+// WithWorkloadApp adds a workload app to the environment
+func WithWorkloadApp(appFunc WorkloadAppFunc) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.workloadAppFuncs = append(params.workloadAppFuncs, appFunc)
+ return nil
+ }
+}
diff --git a/test/new-e2e/pkg/environments/dockerhost.go b/test/new-e2e/pkg/environments/dockerhost.go
index a20ea2c7883f8..0b871efd721eb 100644
--- a/test/new-e2e/pkg/environments/dockerhost.go
+++ b/test/new-e2e/pkg/environments/dockerhost.go
@@ -8,45 +8,20 @@ package environments
import (
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/components"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client"
- "github.com/DataDog/test-infra-definitions/resources/aws"
)
// DockerHost is an environment that contains a Docker VM, FakeIntake and Agent configured to talk to each other.
type DockerHost struct {
- AwsEnvironment *aws.Environment
// Components
RemoteHost *components.RemoteHost
FakeIntake *components.FakeIntake
Agent *components.DockerAgent
-
- // Other clients
- Docker *client.Docker
+ Docker *components.RemoteHostDocker
}
var _ e2e.Initializable = &DockerHost{}
// Init initializes the environment
-func (e *DockerHost) Init(ctx e2e.Context) error {
- privateKeyPath, err := runner.GetProfile().ParamStore().GetWithDefault(parameters.PrivateKeyPath, "")
- if err != nil {
- return err
- }
-
- e.Docker, err = client.NewDocker(ctx.T(), e.RemoteHost.HostOutput, privateKeyPath)
- if err != nil {
- return err
- }
-
- if e.Agent != nil {
- agent, err := client.NewDockerAgentClient(ctx.T(), e.Docker, e.Agent.ContainerName, true)
- if err != nil {
- return err
- }
- e.Agent.Client = agent
- }
-
+func (e *DockerHost) Init(_ e2e.Context) error {
return nil
}
diff --git a/test/new-e2e/pkg/environments/ecs.go b/test/new-e2e/pkg/environments/ecs.go
index 59c024ab6411e..3610318ac66be 100644
--- a/test/new-e2e/pkg/environments/ecs.go
+++ b/test/new-e2e/pkg/environments/ecs.go
@@ -6,39 +6,12 @@
package environments
import (
- "github.com/DataDog/test-infra-definitions/resources/aws"
- "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
- "github.com/zorkian/go-datadog-api"
-
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/components"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters"
)
// ECS is an environment that contains a ECS deployed in a cluster, FakeIntake and Agent configured to talk to each other.
type ECS struct {
- AwsEnvironment *aws.Environment
- ClusterName pulumi.StringInput
- ClusterArn pulumi.StringInput
-
// Components
- FakeIntake *components.FakeIntake
- DatadogClient *datadog.Client
-}
-
-var _ e2e.Initializable = &ECS{}
-
-// Init initializes the environment
-func (e *ECS) Init(_ e2e.Context) error {
- apiKey, err := runner.GetProfile().SecretStore().Get(parameters.APIKey)
- if err != nil {
- return err
- }
- appKey, err := runner.GetProfile().SecretStore().Get(parameters.APPKey)
- if err != nil {
- return err
- }
- e.DatadogClient = datadog.NewClient(apiKey, appKey)
- return nil
+ ECSCluster *components.ECSCluster
+ FakeIntake *components.FakeIntake
}
diff --git a/test/new-e2e/pkg/environments/gcp/host/linux/host.go b/test/new-e2e/pkg/environments/gcp/host/linux/host.go
new file mode 100644
index 0000000000000..0e479d8a51bdf
--- /dev/null
+++ b/test/new-e2e/pkg/environments/gcp/host/linux/host.go
@@ -0,0 +1,124 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package gcphost contains the definition of the GCP Host environment.
+package gcphost
+
+import (
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/test-infra-definitions/resources/gcp"
+ "github.com/DataDog/test-infra-definitions/scenarios/gcp/compute"
+ "github.com/DataDog/test-infra-definitions/scenarios/gcp/fakeintake"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
+
+ "github.com/DataDog/test-infra-definitions/components/datadog/agent"
+ "github.com/DataDog/test-infra-definitions/components/datadog/agentparams"
+ "github.com/DataDog/test-infra-definitions/components/datadog/updater"
+ "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+)
+
+const (
+ provisionerBaseID = "gcp-vm-"
+ defaultVMName = "vm"
+)
+
+// Provisioner creates a VM environment with an VM, a FakeIntake and a Host Agent configured to talk to each other.
+// FakeIntake and Agent creation can be deactivated by using [WithoutFakeIntake] and [WithoutAgent] options.
+func Provisioner(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Host] {
+ // We need to build params here to be able to use params.name in the provisioner name
+ params := GetProvisionerParams(opts...)
+
+ provisioner := e2e.NewTypedPulumiProvisioner(provisionerBaseID+params.name, func(ctx *pulumi.Context, env *environments.Host) error {
+ // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times.
+ // and it's easy to forget about it, leading to hard-to-debug issues.
+ params := GetProvisionerParams(opts...)
+ return Run(ctx, env, RunParams{ProvisionerParams: params})
+ }, params.extraConfigParams)
+
+ return provisioner
+}
+
+// Run deploys an environment given a pulumi.Context
+func Run(ctx *pulumi.Context, env *environments.Host, runParams RunParams) error {
+ var gcpEnv gcp.Environment
+ if runParams.Environment == nil {
+ var err error
+ gcpEnv, err = gcp.NewEnvironment(ctx)
+ if err != nil {
+ return err
+ }
+ } else {
+ gcpEnv = *runParams.Environment
+ }
+ params := runParams.ProvisionerParams
+
+ host, err := compute.NewVM(gcpEnv, params.name, params.instanceOptions...)
+ if err != nil {
+ return err
+ }
+ err = host.Export(ctx, &env.RemoteHost.HostOutput)
+ if err != nil {
+ return err
+ }
+
+ // Create FakeIntake if required
+ if params.fakeintakeOptions != nil {
+ fakeIntake, err := fakeintake.NewVMInstance(gcpEnv, params.fakeintakeOptions...)
+ if err != nil {
+ return err
+ }
+ err = fakeIntake.Export(ctx, &env.FakeIntake.FakeintakeOutput)
+ if err != nil {
+ return err
+ }
+
+ // Normally if FakeIntake is enabled, Agent is enabled, but just in case
+ if params.agentOptions != nil {
+ // Prepend in case it's overridden by the user
+ newOpts := []agentparams.Option{agentparams.WithFakeintake(fakeIntake)}
+ params.agentOptions = append(newOpts, params.agentOptions...)
+ }
+ } else {
+ // Suite inits all fields by default, so we need to explicitly set it to nil
+ env.FakeIntake = nil
+ }
+ if !params.installUpdater {
+ // Suite inits all fields by default, so we need to explicitly set it to nil
+ env.Updater = nil
+ }
+
+ // Create Agent if required
+ if params.installUpdater && params.agentOptions != nil {
+ updater, err := updater.NewHostUpdater(&gcpEnv, host, params.agentOptions...)
+ if err != nil {
+ return err
+ }
+
+ err = updater.Export(ctx, &env.Updater.HostUpdaterOutput)
+ if err != nil {
+ return err
+ }
+ // todo: add agent once updater installs agent on bootstrap
+ env.Agent = nil
+ } else if params.agentOptions != nil {
+ agent, err := agent.NewHostAgent(&gcpEnv, host, params.agentOptions...)
+ if err != nil {
+ return err
+ }
+
+ err = agent.Export(ctx, &env.Agent.HostAgentOutput)
+ if err != nil {
+ return err
+ }
+
+ env.Agent.ClientOptions = params.agentClientOptions
+ } else {
+ // Suite inits all fields by default, so we need to explicitly set it to nil
+ env.Agent = nil
+ }
+
+ return nil
+}
diff --git a/test/new-e2e/pkg/environments/gcp/host/linux/params.go b/test/new-e2e/pkg/environments/gcp/host/linux/params.go
new file mode 100644
index 0000000000000..442fd28b889b0
--- /dev/null
+++ b/test/new-e2e/pkg/environments/gcp/host/linux/params.go
@@ -0,0 +1,152 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package gcphost
+
+import (
+ "fmt"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional"
+ "github.com/DataDog/test-infra-definitions/components/datadog/agentparams"
+ "github.com/DataDog/test-infra-definitions/resources/gcp"
+ "github.com/DataDog/test-infra-definitions/scenarios/gcp/compute"
+ "github.com/DataDog/test-infra-definitions/scenarios/gcp/fakeintake"
+)
+
+// ProvisionerParams is a set of parameters for the Provisioner.
+type ProvisionerParams struct {
+ name string
+
+ instanceOptions []compute.VMOption
+ agentOptions []agentparams.Option
+ agentClientOptions []agentclientparams.Option
+ fakeintakeOptions []fakeintake.Option
+ extraConfigParams runner.ConfigMap
+ installUpdater bool
+}
+
+func newProvisionerParams() *ProvisionerParams {
+ // We use nil arrays to decide if we should create or not
+ return &ProvisionerParams{
+ name: defaultVMName,
+ instanceOptions: []compute.VMOption{},
+ agentOptions: []agentparams.Option{},
+ agentClientOptions: []agentclientparams.Option{},
+ fakeintakeOptions: []fakeintake.Option{},
+ extraConfigParams: runner.ConfigMap{},
+ }
+}
+
+// GetProvisionerParams return ProvisionerParams from options opts setup
+func GetProvisionerParams(opts ...ProvisionerOption) *ProvisionerParams {
+ params := newProvisionerParams()
+ err := optional.ApplyOptions(params, opts)
+ if err != nil {
+ panic(fmt.Errorf("unable to apply ProvisionerOption, err: %w", err))
+ }
+ return params
+}
+
+// ProvisionerOption is a provisioner option.
+type ProvisionerOption func(*ProvisionerParams) error
+
+// WithName sets the name of the provisioner.
+func WithName(name string) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.name = name
+ return nil
+ }
+}
+
+// WithInstanceOptions adds options to the EC2 VM.
+func WithInstanceOptions(opts ...compute.VMOption) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.instanceOptions = append(params.instanceOptions, opts...)
+ return nil
+ }
+}
+
+// WithAgentOptions adds options to the Agent.
+func WithAgentOptions(opts ...agentparams.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentOptions = append(params.agentOptions, opts...)
+ return nil
+ }
+}
+
+// WithAgentClientOptions adds options to the Agent client.
+func WithAgentClientOptions(opts ...agentclientparams.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentClientOptions = append(params.agentClientOptions, opts...)
+ return nil
+ }
+}
+
+// WithFakeIntakeOptions adds options to the FakeIntake.
+func WithFakeIntakeOptions(opts ...fakeintake.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.fakeintakeOptions = append(params.fakeintakeOptions, opts...)
+ return nil
+ }
+}
+
+// WithExtraConfigParams adds extra config parameters to the ConfigMap.
+func WithExtraConfigParams(configMap runner.ConfigMap) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.extraConfigParams = configMap
+ return nil
+ }
+}
+
+// WithoutFakeIntake disables the creation of the FakeIntake.
+func WithoutFakeIntake() ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.fakeintakeOptions = nil
+ return nil
+ }
+}
+
+// WithoutAgent disables the creation of the Agent.
+func WithoutAgent() ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentOptions = nil
+ return nil
+ }
+}
+
+// WithUpdater installs the agent through the updater.
+func WithUpdater() ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.installUpdater = true
+ return nil
+ }
+}
+
+// ProvisionerNoAgentNoFakeIntake wraps Provisioner with hardcoded WithoutAgent and WithoutFakeIntake options.
+func ProvisionerNoAgentNoFakeIntake(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Host] {
+ mergedOpts := make([]ProvisionerOption, 0, len(opts)+2)
+ mergedOpts = append(mergedOpts, opts...)
+ mergedOpts = append(mergedOpts, WithoutAgent(), WithoutFakeIntake())
+
+ return Provisioner(mergedOpts...)
+}
+
+// ProvisionerNoFakeIntake wraps Provisioner with hardcoded WithoutFakeIntake option.
+func ProvisionerNoFakeIntake(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Host] {
+ mergedOpts := make([]ProvisionerOption, 0, len(opts)+1)
+ mergedOpts = append(mergedOpts, opts...)
+ mergedOpts = append(mergedOpts, WithoutFakeIntake())
+
+ return Provisioner(mergedOpts...)
+}
+
+// RunParams is a set of parameters for the Run function.
+type RunParams struct {
+ Environment *gcp.Environment
+ ProvisionerParams *ProvisionerParams
+}
diff --git a/test/new-e2e/pkg/environments/gcp/kubernetes/gke.go b/test/new-e2e/pkg/environments/gcp/kubernetes/gke.go
new file mode 100644
index 0000000000000..10c315fb7b254
--- /dev/null
+++ b/test/new-e2e/pkg/environments/gcp/kubernetes/gke.go
@@ -0,0 +1,94 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package gcpkubernetes contains the provisioner for Google Kubernetes Engine (GKE)
+package gcpkubernetes
+
+import (
+ "github.com/DataDog/test-infra-definitions/resources/gcp"
+ "github.com/DataDog/test-infra-definitions/scenarios/gcp/gke"
+ "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+
+ "github.com/DataDog/test-infra-definitions/components/datadog/agent/helm"
+ "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams"
+ "github.com/DataDog/test-infra-definitions/scenarios/gcp/fakeintake"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional"
+)
+
+const (
+ provisionerBaseID = "gcp-gke"
+)
+
+// GKEProvisioner creates a new provisioner for GKE on GCP
+func GKEProvisioner(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Kubernetes] {
+ // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times.
+ // and it's easy to forget about it, leading to hard to debug issues.
+ params := newProvisionerParams()
+ _ = optional.ApplyOptions(params, opts)
+
+ provisioner := e2e.NewTypedPulumiProvisioner(provisionerBaseID+params.name, func(ctx *pulumi.Context, env *environments.Kubernetes) error {
+ // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times.
+ // and it's easy to forget about it, leading to hard to debug issues.
+ params := newProvisionerParams()
+ _ = optional.ApplyOptions(params, opts)
+
+ return GKERunFunc(ctx, env, params)
+ }, params.extraConfigParams)
+
+ return provisioner
+}
+
+// GKERunFunc is the run function for GKE provisioner
+func GKERunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *ProvisionerParams) error {
+ gcpEnv, err := gcp.NewEnvironment(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Create the cluster
+ cluster, err := gke.NewGKECluster(gcpEnv, params.gkeOptions...)
+ if err != nil {
+ return err
+ }
+ err = cluster.Export(ctx, &env.KubernetesCluster.ClusterOutput)
+ if err != nil {
+ return err
+ }
+
+ agentOptions := params.agentOptions
+
+ // Deploy a fakeintake
+ if params.fakeintakeOptions != nil {
+ fakeIntake, err := fakeintake.NewVMInstance(gcpEnv, params.fakeintakeOptions...)
+ if err != nil {
+ return err
+ }
+ err = fakeIntake.Export(ctx, &env.FakeIntake.FakeintakeOutput)
+ if err != nil {
+ return err
+ }
+ agentOptions = append(agentOptions, kubernetesagentparams.WithFakeintake(fakeIntake))
+
+ } else {
+ env.FakeIntake = nil
+ }
+
+ if params.agentOptions != nil {
+ agent, err := helm.NewKubernetesAgent(&gcpEnv, params.name, cluster.KubeProvider, agentOptions...)
+ if err != nil {
+ return err
+ }
+ err = agent.Export(ctx, &env.Agent.KubernetesAgentOutput)
+ if err != nil {
+ return err
+ }
+ } else {
+ env.Agent = nil
+ }
+ return nil
+}
diff --git a/test/new-e2e/pkg/environments/gcp/kubernetes/params.go b/test/new-e2e/pkg/environments/gcp/kubernetes/params.go
new file mode 100644
index 0000000000000..d42a5dac75f9e
--- /dev/null
+++ b/test/new-e2e/pkg/environments/gcp/kubernetes/params.go
@@ -0,0 +1,100 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package gcpkubernetes contains the provisioner for Google Kubernetes Engine (GKE)
+package gcpkubernetes
+
+import (
+ "fmt"
+ "github.com/DataDog/test-infra-definitions/scenarios/gcp/gke"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional"
+
+ "github.com/DataDog/test-infra-definitions/common/config"
+ "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams"
+ kubeComp "github.com/DataDog/test-infra-definitions/components/kubernetes"
+ "github.com/DataDog/test-infra-definitions/scenarios/gcp/fakeintake"
+
+ "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes"
+)
+
+// ProvisionerParams contains all the parameters needed to create the environment
+type ProvisionerParams struct {
+ name string
+ fakeintakeOptions []fakeintake.Option
+ agentOptions []kubernetesagentparams.Option
+ gkeOptions []gke.Option
+ workloadAppFuncs []WorkloadAppFunc
+ extraConfigParams runner.ConfigMap
+}
+
+func newProvisionerParams(opts ...ProvisionerOption) *ProvisionerParams {
+ params := &ProvisionerParams{
+ name: "gke",
+ fakeintakeOptions: []fakeintake.Option{},
+ agentOptions: []kubernetesagentparams.Option{},
+ workloadAppFuncs: []WorkloadAppFunc{},
+ }
+ err := optional.ApplyOptions(params, opts)
+ if err != nil {
+ panic(fmt.Sprintf("failed to apply options: %v", err))
+ }
+ return params
+}
+
+// ProvisionerOption is a function that modifies the ProvisionerParams
+type ProvisionerOption func(*ProvisionerParams) error
+
+// WithName sets the name of the provisioner
+func WithName(name string) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.name = name
+ return nil
+ }
+}
+
+// WithAgentOptions adds options to the agent
+func WithAgentOptions(opts ...kubernetesagentparams.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentOptions = opts
+ return nil
+ }
+}
+
+// WithFakeIntakeOptions adds options to the fake intake
+func WithFakeIntakeOptions(opts ...fakeintake.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.fakeintakeOptions = opts
+ return nil
+ }
+}
+
+// WithGKEOptions adds options to the cluster
+func WithGKEOptions(opts ...gke.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.gkeOptions = opts
+ return nil
+ }
+}
+
+// WithExtraConfigParams adds extra config parameters to the environment
+func WithExtraConfigParams(configMap runner.ConfigMap) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.extraConfigParams = configMap
+ return nil
+ }
+}
+
+// WorkloadAppFunc is a function that deploys a workload app to a kube provider
+type WorkloadAppFunc func(e config.Env, kubeProvider *kubernetes.Provider) (*kubeComp.Workload, error)
+
+// WithWorkloadApp adds a workload app to the environment
+func WithWorkloadApp(appFunc WorkloadAppFunc) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.workloadAppFuncs = append(params.workloadAppFuncs, appFunc)
+ return nil
+ }
+}
diff --git a/test/new-e2e/pkg/environments/host.go b/test/new-e2e/pkg/environments/host.go
index b1d1bd9903776..20c49c3fc7762 100644
--- a/test/new-e2e/pkg/environments/host.go
+++ b/test/new-e2e/pkg/environments/host.go
@@ -8,31 +8,19 @@ package environments
import (
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/components"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client"
- "github.com/DataDog/test-infra-definitions/resources/aws"
)
// Host is an environment that contains a Host, FakeIntake and Agent configured to talk to each other.
type Host struct {
- AwsEnvironment *aws.Environment
- // Components
RemoteHost *components.RemoteHost
FakeIntake *components.FakeIntake
Agent *components.RemoteHostAgent
Updater *components.RemoteHostUpdater
}
-var _ e2e.Initializable = &Host{}
+var _ e2e.Initializable = (*Host)(nil)
// Init initializes the environment
-func (e *Host) Init(ctx e2e.Context) error {
- if e.Agent != nil {
- agent, err := client.NewHostAgentClient(ctx.T(), e.RemoteHost, true)
- if err != nil {
- return err
- }
- e.Agent.Client = agent
- }
-
+func (e *Host) Init(_ e2e.Context) error {
return nil
}
diff --git a/test/new-e2e/pkg/environments/host_win.go b/test/new-e2e/pkg/environments/host_win.go
index f29a88ab2d63a..474d441f8fee5 100644
--- a/test/new-e2e/pkg/environments/host_win.go
+++ b/test/new-e2e/pkg/environments/host_win.go
@@ -8,13 +8,12 @@ package environments
import (
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/components"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client"
- "github.com/DataDog/test-infra-definitions/resources/aws"
+ "github.com/DataDog/test-infra-definitions/common/config"
)
// WindowsHost is an environment based on environments.Host but that is specific to Windows.
type WindowsHost struct {
- AwsEnvironment *aws.Environment
+ Environment config.Env
// Components
RemoteHost *components.RemoteHost
FakeIntake *components.FakeIntake
@@ -25,14 +24,6 @@ type WindowsHost struct {
var _ e2e.Initializable = &WindowsHost{}
// Init initializes the environment
-func (e *WindowsHost) Init(ctx e2e.Context) error {
- if e.Agent != nil {
- agent, err := client.NewHostAgentClient(ctx.T(), e.RemoteHost, true)
- if err != nil {
- return err
- }
- e.Agent.Client = agent
- }
-
+func (e *WindowsHost) Init(_ e2e.Context) error {
return nil
}
diff --git a/test/new-e2e/pkg/environments/local/kubernetes/kind.go b/test/new-e2e/pkg/environments/local/kubernetes/kind.go
new file mode 100644
index 0000000000000..b8f76751dcf48
--- /dev/null
+++ b/test/new-e2e/pkg/environments/local/kubernetes/kind.go
@@ -0,0 +1,205 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package localkubernetes contains the provisioner for the local Kubernetes based environments
+package localkubernetes
+
+import (
+ "fmt"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional"
+
+ "github.com/DataDog/test-infra-definitions/common/config"
+ "github.com/DataDog/test-infra-definitions/components/datadog/agent/helm"
+ "github.com/DataDog/test-infra-definitions/resources/local"
+
+ fakeintakeComp "github.com/DataDog/test-infra-definitions/components/datadog/fakeintake"
+ "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams"
+ kubeComp "github.com/DataDog/test-infra-definitions/components/kubernetes"
+ "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake"
+
+ "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes"
+ "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+)
+
+const (
+ provisionerBaseID = "aws-kind-"
+ defaultVMName = "kind"
+)
+
+// ProvisionerParams contains all the parameters needed to create the environment
+type ProvisionerParams struct {
+ name string
+ agentOptions []kubernetesagentparams.Option
+ fakeintakeOptions []fakeintake.Option
+ extraConfigParams runner.ConfigMap
+ workloadAppFuncs []WorkloadAppFunc
+}
+
+func newProvisionerParams() *ProvisionerParams {
+ return &ProvisionerParams{
+ name: defaultVMName,
+ agentOptions: []kubernetesagentparams.Option{},
+ fakeintakeOptions: []fakeintake.Option{},
+ extraConfigParams: runner.ConfigMap{},
+ }
+}
+
+// WorkloadAppFunc is a function that deploys a workload app to a kube provider
+type WorkloadAppFunc func(e config.Env, kubeProvider *kubernetes.Provider) (*kubeComp.Workload, error)
+
+// ProvisionerOption is a function that modifies the ProvisionerParams
+type ProvisionerOption func(*ProvisionerParams) error
+
+// WithName sets the name of the provisioner
+func WithName(name string) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.name = name
+ return nil
+ }
+}
+
+// WithAgentOptions adds options to the agent
+func WithAgentOptions(opts ...kubernetesagentparams.Option) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentOptions = opts
+ return nil
+ }
+}
+
+// WithoutFakeIntake removes the fake intake
+func WithoutFakeIntake() ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.fakeintakeOptions = nil
+ return nil
+ }
+}
+
+// WithoutAgent removes the agent
+func WithoutAgent() ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.agentOptions = nil
+ return nil
+ }
+}
+
+// WithExtraConfigParams adds extra config parameters to the environment
+func WithExtraConfigParams(configMap runner.ConfigMap) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.extraConfigParams = configMap
+ return nil
+ }
+}
+
+// WithWorkloadApp adds a workload app to the environment
+func WithWorkloadApp(appFunc WorkloadAppFunc) ProvisionerOption {
+ return func(params *ProvisionerParams) error {
+ params.workloadAppFuncs = append(params.workloadAppFuncs, appFunc)
+ return nil
+ }
+}
+
+// Provisioner creates a new provisioner
+func Provisioner(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Kubernetes] {
+ // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times.
+ // and it's easy to forget about it, leading to hard to debug issues.
+ params := newProvisionerParams()
+ _ = optional.ApplyOptions(params, opts)
+
+ provisioner := e2e.NewTypedPulumiProvisioner(provisionerBaseID+params.name, func(ctx *pulumi.Context, env *environments.Kubernetes) error {
+ // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times.
+ // and it's easy to forget about it, leading to hard to debug issues.
+ params := newProvisionerParams()
+ _ = optional.ApplyOptions(params, opts)
+
+ return KindRunFunc(ctx, env, params)
+ }, params.extraConfigParams)
+
+ return provisioner
+}
+
+// KindRunFunc is the Pulumi run function that runs the provisioner
+func KindRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *ProvisionerParams) error {
+
+ localEnv, err := local.NewEnvironment(ctx)
+ if err != nil {
+ return err
+ }
+
+ kindCluster, err := kubeComp.NewLocalKindCluster(&localEnv, localEnv.CommonNamer().ResourceName("kind"), params.name, localEnv.KubernetesVersion())
+ if err != nil {
+ return err
+ }
+
+ err = kindCluster.Export(ctx, &env.KubernetesCluster.ClusterOutput)
+ if err != nil {
+ return err
+ }
+
+ kubeProvider, err := kubernetes.NewProvider(ctx, localEnv.CommonNamer().ResourceName("k8s-provider"), &kubernetes.ProviderArgs{
+ EnableServerSideApply: pulumi.Bool(true),
+ Kubeconfig: kindCluster.KubeConfig,
+ })
+ if err != nil {
+ return err
+ }
+
+ if params.fakeintakeOptions != nil {
+ fakeintakeOpts := []fakeintake.Option{fakeintake.WithLoadBalancer()}
+ params.fakeintakeOptions = append(fakeintakeOpts, params.fakeintakeOptions...)
+ fakeIntake, err := fakeintakeComp.NewLocalDockerFakeintake(&localEnv, "fakeintake")
+ if err != nil {
+ return err
+ }
+ err = fakeIntake.Export(ctx, &env.FakeIntake.FakeintakeOutput)
+ if err != nil {
+ return err
+ }
+
+ if params.agentOptions != nil {
+ newOpts := []kubernetesagentparams.Option{kubernetesagentparams.WithFakeintake(fakeIntake)}
+ params.agentOptions = append(newOpts, params.agentOptions...)
+ }
+ } else {
+ env.FakeIntake = nil
+ }
+
+ if params.agentOptions != nil {
+ kindClusterName := ctx.Stack()
+ helmValues := fmt.Sprintf(`
+datadog:
+ kubelet:
+ tlsVerify: false
+ clusterName: "%s"
+agents:
+ useHostNetwork: true
+`, kindClusterName)
+
+ newOpts := []kubernetesagentparams.Option{kubernetesagentparams.WithHelmValues(helmValues)}
+ params.agentOptions = append(newOpts, params.agentOptions...)
+ agent, err := helm.NewKubernetesAgent(&localEnv, kindClusterName, kubeProvider, params.agentOptions...)
+ if err != nil {
+ return err
+ }
+ err = agent.Export(ctx, &env.Agent.KubernetesAgentOutput)
+ if err != nil {
+ return err
+ }
+ } else {
+ env.Agent = nil
+ }
+
+ for _, appFunc := range params.workloadAppFuncs {
+ _, err := appFunc(&localEnv, kubeProvider)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/test/new-e2e/pkg/runner/ci_profile.go b/test/new-e2e/pkg/runner/ci_profile.go
index b2b19b37b24b8..44637d79356d8 100644
--- a/test/new-e2e/pkg/runner/ci_profile.go
+++ b/test/new-e2e/pkg/runner/ci_profile.go
@@ -15,9 +15,14 @@ import (
const (
defaultCISecretPrefix = "ci.datadog-agent."
- defaultCIEnvironments = "aws/agent-qa"
)
+var defaultCIEnvironments = map[string]string{
+ "aws": "agent-qa",
+ "az": "agent-qa",
+ "gcp": "agent-qa",
+}
+
type ciProfile struct {
baseProfile
@@ -38,22 +43,38 @@ func NewCIProfile() (Profile, error) {
if err != nil {
return nil, fmt.Errorf("unable to get pulumi state password, err: %w", err)
}
+ // TODO move to job script
os.Setenv("PULUMI_CONFIG_PASSPHRASE", passVal)
// Building name prefix
- pipelineID := os.Getenv("CI_PIPELINE_ID")
+ jobID := os.Getenv("CI_JOB_ID")
projectID := os.Getenv("CI_PROJECT_ID")
- if pipelineID == "" || projectID == "" {
- return nil, fmt.Errorf("unable to compute name prefix, missing variables pipeline id: %s, project id: %s", pipelineID, projectID)
+ if jobID == "" || projectID == "" {
+ return nil, fmt.Errorf("unable to compute name prefix, missing variables job id: %s, project id: %s", jobID, projectID)
}
-
+ uniqueID := jobID
store := parameters.NewEnvStore(EnvPrefix)
+ initOnly, err := store.GetBoolWithDefault(parameters.InitOnly, false)
+ if err != nil {
+ return nil, err
+ }
+
+ preInitialized, err := store.GetBoolWithDefault(parameters.PreInitialized, false)
+ if err != nil {
+ return nil, err
+ }
+
+ if initOnly || preInitialized {
+ uniqueID = fmt.Sprintf("init-%s", os.Getenv("CI_PIPELINE_ID")) // We use pipeline ID for init only and pre-initialized jobs, to be able to share state
+ }
+
// get environments from store
- environmentsStr, err := store.GetWithDefault(parameters.Environments, defaultCIEnvironments)
+ environmentsStr, err := store.GetWithDefault(parameters.Environments, "")
if err != nil {
return nil, err
}
+ environmentsStr = mergeEnvironments(environmentsStr, defaultCIEnvironments)
// TODO can be removed using E2E_ENV variable
ciEnvNames := os.Getenv("CI_ENV_NAMES")
@@ -68,7 +89,7 @@ func NewCIProfile() (Profile, error) {
return ciProfile{
baseProfile: newProfile("e2eci", ciEnvironments, store, &secretStore, outputRoot),
- ciUniqueID: "ci-" + pipelineID + "-" + projectID,
+ ciUniqueID: "ci-" + uniqueID + "-" + projectID,
}, nil
}
diff --git a/test/new-e2e/pkg/runner/configmap.go b/test/new-e2e/pkg/runner/configmap.go
index 0334186095f7e..20dcb1a875cdd 100644
--- a/test/new-e2e/pkg/runner/configmap.go
+++ b/test/new-e2e/pkg/runner/configmap.go
@@ -9,37 +9,59 @@ import (
"encoding/json"
"errors"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters"
commonconfig "github.com/DataDog/test-infra-definitions/common/config"
infraaws "github.com/DataDog/test-infra-definitions/resources/aws"
+ infraazure "github.com/DataDog/test-infra-definitions/resources/azure"
+ infragcp "github.com/DataDog/test-infra-definitions/resources/gcp"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters"
"github.com/pulumi/pulumi/sdk/v3/go/auto"
)
const (
- // AgentAPIKey pulumi config paramater name
+ // AgentAPIKey pulumi config parameter name
AgentAPIKey = commonconfig.DDAgentConfigNamespace + ":" + commonconfig.DDAgentAPIKeyParamName
- // AgentAPPKey pulumi config paramater name
+ // AgentAPPKey pulumi config parameter name
AgentAPPKey = commonconfig.DDAgentConfigNamespace + ":" + commonconfig.DDAgentAPPKeyParamName
// AgentPipelineID pulumi config parameter name
AgentPipelineID = commonconfig.DDAgentConfigNamespace + ":" + commonconfig.DDAgentPipelineID
+ // AgentMajorVersion pulumi config parameter name
+ AgentMajorVersion = commonconfig.DDAgentConfigNamespace + ":" + commonconfig.DDAgentMajorVersion
// AgentCommitSHA pulumi config parameter name
AgentCommitSHA = commonconfig.DDAgentConfigNamespace + ":" + commonconfig.DDAgentCommitSHA
- // InfraEnvironmentVariables pulumi config paramater name
+ // InfraEnvironmentVariables pulumi config parameter name
InfraEnvironmentVariables = commonconfig.DDInfraConfigNamespace + ":" + commonconfig.DDInfraEnvironment
- // InfraExtraResourcesTags pulumi config paramater name
+ // InfraExtraResourcesTags pulumi config parameter name
InfraExtraResourcesTags = commonconfig.DDInfraConfigNamespace + ":" + commonconfig.DDInfraExtraResourcesTags
- // AWSKeyPairName pulumi config paramater name
+ //InfraInitOnly pulumi config parameter name
+ InfraInitOnly = commonconfig.DDInfraConfigNamespace + ":" + commonconfig.DDInfraInitOnly
+
+ // AWSKeyPairName pulumi config parameter name
AWSKeyPairName = commonconfig.DDInfraConfigNamespace + ":" + infraaws.DDInfraDefaultKeyPairParamName
- // AWSPublicKeyPath pulumi config paramater name
+ // AWSPublicKeyPath pulumi config parameter name
AWSPublicKeyPath = commonconfig.DDInfraConfigNamespace + ":" + infraaws.DDinfraDefaultPublicKeyPath
- // AWSPrivateKeyPath pulumi config paramater name
+ // AWSPrivateKeyPath pulumi config parameter name
AWSPrivateKeyPath = commonconfig.DDInfraConfigNamespace + ":" + infraaws.DDInfraDefaultPrivateKeyPath
- // AWSPrivateKeyPassword pulumi config paramater name
+ // AWSPrivateKeyPassword pulumi config parameter name
AWSPrivateKeyPassword = commonconfig.DDInfraConfigNamespace + ":" + infraaws.DDInfraDefaultPrivateKeyPassword
+
+ // AzurePublicKeyPath pulumi config paramater name
+ AzurePublicKeyPath = commonconfig.DDInfraConfigNamespace + ":" + infraazure.DDInfraDefaultPublicKeyPath
+ // AzurePrivateKeyPath pulumi config paramater name
+ AzurePrivateKeyPath = commonconfig.DDInfraConfigNamespace + ":" + infraazure.DDInfraDefaultPrivateKeyPath
+ // AzurePrivateKeyPassword pulumi config paramater name
+ AzurePrivateKeyPassword = commonconfig.DDInfraConfigNamespace + ":" + infraazure.DDInfraDefaultPrivateKeyPassword
+
+ // GCPPublicKeyPath pulumi config paramater name
+ GCPPublicKeyPath = commonconfig.DDInfraConfigNamespace + ":" + infragcp.DDInfraDefaultPublicKeyPath
+ // GCPPrivateKeyPath pulumi config paramater name
+ GCPPrivateKeyPath = commonconfig.DDInfraConfigNamespace + ":" + infragcp.DDInfraDefaultPrivateKeyPath
+ // GCPPrivateKeyPassword pulumi config paramater name
+ GCPPrivateKeyPassword = commonconfig.DDInfraConfigNamespace + ":" + infragcp.DDInfraDefaultPrivateKeyPassword
)
// ConfigMap type alias to auto.ConfigMap
@@ -94,48 +116,47 @@ func setConfigMapFromParameter(store parameters.Store, cm ConfigMap, paramName p
// BuildStackParameters creates a config map from a profile, a scenario config map
// and env/cli configuration parameters
func BuildStackParameters(profile Profile, scenarioConfig ConfigMap) (ConfigMap, error) {
+ var err error
// Priority order: profile configs < scenarioConfig < Env/CLI config
cm := ConfigMap{}
// Parameters from profile
- cm.Set("ddinfra:env", profile.EnvironmentNames(), false)
- err := SetConfigMapFromParameter(profile.ParamStore(), cm, parameters.KeyPairName, AWSKeyPairName)
- if err != nil {
- return nil, err
+ cm.Set(InfraEnvironmentVariables, profile.EnvironmentNames(), false)
+ params := map[parameters.StoreKey][]string{
+ parameters.KeyPairName: {AWSKeyPairName},
+ parameters.PublicKeyPath: {AWSPublicKeyPath, AzurePublicKeyPath, GCPPublicKeyPath},
+ parameters.PrivateKeyPath: {AWSPrivateKeyPath, AzurePrivateKeyPath, GCPPrivateKeyPath},
+ parameters.ExtraResourcesTags: {InfraExtraResourcesTags},
+ parameters.PipelineID: {AgentPipelineID},
+ parameters.MajorVersion: {AgentMajorVersion},
+ parameters.CommitSHA: {AgentCommitSHA},
+ parameters.InitOnly: {InfraInitOnly},
}
- err = SetConfigMapFromParameter(profile.ParamStore(), cm, parameters.PublicKeyPath, AWSPublicKeyPath)
- if err != nil {
- return nil, err
- }
- err = SetConfigMapFromParameter(profile.ParamStore(), cm, parameters.PrivateKeyPath, AWSPrivateKeyPath)
- if err != nil {
- return nil, err
- }
- err = SetConfigMapFromParameter(profile.ParamStore(), cm, parameters.ExtraResourcesTags, InfraExtraResourcesTags)
- if err != nil {
- return nil, err
- }
- err = SetConfigMapFromParameter(profile.ParamStore(), cm, parameters.PipelineID, AgentPipelineID)
- if err != nil {
- return nil, err
- }
- err = SetConfigMapFromParameter(profile.ParamStore(), cm, parameters.CommitSHA, AgentCommitSHA)
- if err != nil {
- return nil, err
+
+ for storeKey, configMapKeys := range params {
+ for _, configMapKey := range configMapKeys {
+
+ err = SetConfigMapFromParameter(profile.ParamStore(), cm, storeKey, configMapKey)
+ if err != nil {
+ return nil, err
+ }
+ }
}
// Secret parameters from profile store
- err = SetConfigMapFromSecret(profile.SecretStore(), cm, parameters.APIKey, AgentAPIKey)
- if err != nil {
- return nil, err
- }
- err = SetConfigMapFromSecret(profile.SecretStore(), cm, parameters.APPKey, AgentAPPKey)
- if err != nil {
- return nil, err
+ secretParams := map[parameters.StoreKey][]string{
+ parameters.APIKey: {AgentAPIKey},
+ parameters.APPKey: {AgentAPPKey},
+ parameters.PrivateKeyPassword: {AWSPrivateKeyPassword, AzurePrivateKeyPassword, GCPPrivateKeyPassword},
}
- err = SetConfigMapFromSecret(profile.SecretStore(), cm, parameters.PrivateKeyPassword, AWSPrivateKeyPassword)
- if err != nil {
- return nil, err
+
+ for storeKey, configMapKeys := range secretParams {
+ for _, configMapKey := range configMapKeys {
+ err = SetConfigMapFromSecret(profile.SecretStore(), cm, storeKey, configMapKey)
+ if err != nil {
+ return nil, err
+ }
+ }
}
// Merge with scenario variables
diff --git a/test/new-e2e/pkg/runner/configmap_test.go b/test/new-e2e/pkg/runner/configmap_test.go
index ea2ba6fbd1c4c..f60b91cfef341 100644
--- a/test/new-e2e/pkg/runner/configmap_test.go
+++ b/test/new-e2e/pkg/runner/configmap_test.go
@@ -11,10 +11,11 @@ import (
"encoding/json"
"testing"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters"
"github.com/pulumi/pulumi/sdk/v3/go/auto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters"
)
func Test_BuildStackParameters(t *testing.T) {
@@ -35,10 +36,18 @@ func Test_BuildStackParameters(t *testing.T) {
"ddinfra:aws/defaultKeyPairName": auto.ConfigValue{Value: "key_pair_name", Secret: false},
"ddinfra:env": auto.ConfigValue{Value: "", Secret: false},
"ddinfra:extraResourcesTags": auto.ConfigValue{Value: "extra_resources_tags", Secret: false},
+ "ddinfra:initOnly": auto.ConfigValue{Value: "init_only", Secret: false},
"ddinfra:aws/defaultPublicKeyPath": auto.ConfigValue{Value: "public_key_path", Secret: false},
"ddinfra:aws/defaultPrivateKeyPath": auto.ConfigValue{Value: "private_key_path", Secret: false},
"ddinfra:aws/defaultPrivateKeyPassword": auto.ConfigValue{Value: "private_key_password", Secret: true},
+ "ddinfra:az/defaultPublicKeyPath": auto.ConfigValue{Value: "public_key_path", Secret: false},
+ "ddinfra:az/defaultPrivateKeyPath": auto.ConfigValue{Value: "private_key_path", Secret: false},
+ "ddinfra:az/defaultPrivateKeyPassword": auto.ConfigValue{Value: "private_key_password", Secret: true},
+ "ddinfra:gcp/defaultPublicKeyPath": auto.ConfigValue{Value: "public_key_path", Secret: false},
+ "ddinfra:gcp/defaultPrivateKeyPath": auto.ConfigValue{Value: "private_key_path", Secret: false},
+ "ddinfra:gcp/defaultPrivateKeyPassword": auto.ConfigValue{Value: "private_key_password", Secret: true},
"ddagent:pipeline_id": auto.ConfigValue{Value: "pipeline_id", Secret: false},
"ddagent:commit_sha": auto.ConfigValue{Value: "commit_sha", Secret: false},
+ "ddagent:majorVersion": auto.ConfigValue{Value: "major_version", Secret: false},
}, configMap)
}
diff --git a/test/new-e2e/pkg/runner/local_profile.go b/test/new-e2e/pkg/runner/local_profile.go
index 476896f945e87..de08513ae1426 100644
--- a/test/new-e2e/pkg/runner/local_profile.go
+++ b/test/new-e2e/pkg/runner/local_profile.go
@@ -10,15 +10,16 @@ import (
"os"
"os/user"
"path"
- "path/filepath"
"strings"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters"
)
-const (
- defaultLocalEnvironments string = "aws/agent-sandbox"
-)
+var defaultLocalEnvironments = map[string]string{
+ "aws": "agent-sandbox",
+ "az": "agent-sandbox",
+ "gcp": "agent-sandbox",
+}
// NewLocalProfile creates a new local profile
func NewLocalProfile() (Profile, error) {
@@ -40,10 +41,12 @@ func NewLocalProfile() (Profile, error) {
store = parameters.NewCascadingStore(envValueStore)
}
// inject default params
- environments, err := store.GetWithDefault(parameters.Environments, defaultLocalEnvironments)
+ environments, err := store.GetWithDefault(parameters.Environments, "")
if err != nil {
return nil, err
}
+ environments = mergeEnvironments(environments, defaultLocalEnvironments)
+
outputDir := getLocalOutputDir()
return localProfile{baseProfile: newProfile("e2elocal", strings.Split(environments, " "), store, nil, outputDir)}, nil
}
@@ -115,27 +118,3 @@ func (p localProfile) NamePrefix() string {
func (p localProfile) AllowDevMode() bool {
return true
}
-
-// GetOutputDir extends baseProfile.GetOutputDir to create a symlink to the latest run
-func (p localProfile) GetOutputDir() (string, error) {
- outDir, err := p.baseProfile.GetOutputDir()
- if err != nil {
- return "", err
- }
-
- // Create a symlink to the latest run for user convenience
- latestLink := filepath.Join(filepath.Dir(outDir), "latest")
- // Remove the symlink if it already exists
- if _, err := os.Lstat(latestLink); err == nil {
- err = os.Remove(latestLink)
- if err != nil {
- return "", err
- }
- }
- err = os.Symlink(outDir, latestLink)
- if err != nil {
- return "", err
- }
-
- return outDir, nil
-}
diff --git a/test/new-e2e/pkg/runner/parameters/const.go b/test/new-e2e/pkg/runner/parameters/const.go
index 2f5e4691a7fbe..c23c1502946b6 100644
--- a/test/new-e2e/pkg/runner/parameters/const.go
+++ b/test/new-e2e/pkg/runner/parameters/const.go
@@ -9,42 +9,50 @@ package parameters
type StoreKey string
const (
- // APIKey config file parameter name
+ // APIKey Datadog api key
APIKey StoreKey = "api_key"
- // APPKey config file parameter name
+ // APPKey Datadog app key
APPKey StoreKey = "app_key"
- // Environments config file parameter name
+ // Environments space-separated cloud environments
Environments StoreKey = "env"
- // ExtraResourcesTags config file parameter name
+ // ExtraResourcesTags extra tags to label resources
ExtraResourcesTags StoreKey = "extra_resources_tags"
- // KeyPairName config file parameter name
+ // KeyPairName aws keypairname, used to access EC2 instances
KeyPairName StoreKey = "key_pair_name"
- // PrivateKeyPassword config file parameter name
+ // PrivateKeyPassword private ssh key password
PrivateKeyPassword StoreKey = "private_key_password"
- // PrivateKeyPath config file parameter name
+ // PrivateKeyPath private ssh key path
PrivateKeyPath StoreKey = "private_key_path"
- // Profile config file parameter name
+ // Profile aws profile name
Profile StoreKey = "profile"
- // PublicKeyPath config file parameter name
+ // PublicKeyPath public ssh key path
PublicKeyPath StoreKey = "public_key_path"
// PulumiPassword config file parameter name
PulumiPassword StoreKey = "pulumi_password"
- // SkipDeleteOnFailure config file parameter name
+ // SkipDeleteOnFailure keep the stack on test failure
SkipDeleteOnFailure StoreKey = "skip_delete_on_failure"
- // StackParameters config file parameter name
+ // StackParameters configuration map for the stack, in a json formatted string
StackParameters StoreKey = "stack_params"
- // PipelineID config file parameter name
+ // PipelineID used to deploy agent artifacts from a Gitlab pipeline
PipelineID StoreKey = "pipeline_id"
- // CommitSHA config file parameter name
+ // CommitSHA is used to deploy agent artifacts from a specific commit, needed for docker images
CommitSHA StoreKey = "commit_sha"
- // VerifyCodeSignature config file parameter name
+ // VerifyCodeSignature of the agent
VerifyCodeSignature StoreKey = "verify_code_signature"
- // OutputDir config file parameter name
+ // OutputDir path to store test artifacts
OutputDir StoreKey = "output_dir"
- // PulumiLogLevel config file parameter name
+ // PulumiLogLevel sets the log level for pulumi. Pulumi emits logs at log levels between 1 and 11, with 11 being the most verbose.
PulumiLogLevel StoreKey = "pulumi_log_level"
- // PulumiLogToStdErr config file parameter name
+ // PulumiLogToStdErr specifies that all logs should be sent directly to stderr - making it more accessible and avoiding OS level buffering.
PulumiLogToStdErr StoreKey = "pulumi_log_to_stderr"
- // DevMode config flag parameter name
+ // PulumiVerboseProgressStreams allows specifying one or more io.Writers to redirect incremental update stdout
+ PulumiVerboseProgressStreams StoreKey = "pulumi_verbose_progress_streams"
+ // DevMode allows to keep the stack after the test completes
DevMode StoreKey = "dev_mode"
+ // InitOnly config flag parameter name
+ InitOnly StoreKey = "init_only"
+ // PreInitialized config flag parameter name
+ PreInitialized StoreKey = "pre_initialized"
+ // MajorVersion config flag parameter name
+ MajorVersion StoreKey = "major_version"
)
diff --git a/test/new-e2e/pkg/runner/parameters/store_aws.go b/test/new-e2e/pkg/runner/parameters/store_aws.go
index 233efdd537557..1a9ccfd55228a 100644
--- a/test/new-e2e/pkg/runner/parameters/store_aws.go
+++ b/test/new-e2e/pkg/runner/parameters/store_aws.go
@@ -9,11 +9,13 @@ import (
"context"
"errors"
"fmt"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/common"
"strings"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/clients"
"github.com/aws/aws-sdk-go-v2/service/ssm"
ssmTypes "github.com/aws/aws-sdk-go-v2/service/ssm/types"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/clients"
)
var _ valueStore = &awsStore{}
@@ -36,18 +38,27 @@ func (s awsStore) get(key StoreKey) (string, error) {
if err != nil {
return "", err
}
+ if newKey, ok := awsOverrides[key]; ok {
+ key = newKey
+ }
awsKey := strings.ToLower(s.prefix + string(key))
- withDecription := true
- output, err := ssmClient.GetParameter(context.Background(), &ssm.GetParameterInput{Name: &awsKey, WithDecryption: &withDecription})
+ withDecryption := true
+ output, err := ssmClient.GetParameter(context.Background(), &ssm.GetParameterInput{Name: &awsKey, WithDecryption: &withDecryption})
if err != nil {
var notFoundError *ssmTypes.ParameterNotFound
if errors.As(err, ¬FoundError) {
return "", ParameterNotFoundError{key: key}
}
- return "", fmt.Errorf("failed to get SSM parameter '%s', err: %w", awsKey, err)
+ return "", common.InternalError{Err: fmt.Errorf("failed to get SSM parameter '%s', err: %w", awsKey, err)}
}
return *output.Parameter.Value, nil
}
+
+// awsOverrides is a map of StoreKey to StoreKey used to override key only in AWS store
+var awsOverrides = map[StoreKey]StoreKey{
+ APIKey: "api_key_2",
+ APPKey: "app_key_2",
+}
diff --git a/test/new-e2e/pkg/runner/parameters/store_config_file.go b/test/new-e2e/pkg/runner/parameters/store_config_file.go
index 7e775b4d82b6d..e9f20051e3708 100644
--- a/test/new-e2e/pkg/runner/parameters/store_config_file.go
+++ b/test/new-e2e/pkg/runner/parameters/store_config_file.go
@@ -6,7 +6,9 @@
package parameters
import (
+ "fmt"
"os"
+ "strings"
"encoding/json"
@@ -35,6 +37,7 @@ type Config struct {
// ConfigParams instance contains config relayed parameters
type ConfigParams struct {
AWS AWS `yaml:"aws"`
+ Azure Azure `yaml:"azure"`
Agent Agent `yaml:"agent"`
OutputDir string `yaml:"outputDir"`
Pulumi Pulumi `yaml:"pulumi"`
@@ -51,6 +54,14 @@ type AWS struct {
TeamTag string `yaml:"teamTag"`
}
+// Azure instance contains Azure related parameters
+type Azure struct {
+ Account string `yaml:"account"`
+ PublicKeyPath string `yaml:"publicKeyPath"`
+ PrivateKeyPath string `yaml:"privateKeyPath"`
+ PrivateKeyPassword string `yaml:"privateKeyPassword"`
+}
+
// Agent instance contains agent related parameters
type Agent struct {
APIKey string `yaml:"apiKey"`
@@ -68,6 +79,9 @@ type Pulumi struct {
// Set this option to true to log to stderr instead.
// https://www.pulumi.com/docs/support/troubleshooting/#verbose-logging
LogToStdErr string `yaml:"logToStdErr"`
+ // To reduce logs noise in the CI, by default we display only the Pulumi error progress steam.
+ // Set this option to true to display all the progress streams.
+ VerboseProgressStreams string `yaml:"verboseProgressStreams"`
}
var _ valueStore = &configFileValueStore{}
@@ -129,14 +143,19 @@ func (s configFileValueStore) get(key StoreKey) (string, error) {
value = s.config.ConfigParams.AWS.PrivateKeyPassword
case StackParameters:
value = s.stackParamsJSON
- case Environments:
- if s.config.ConfigParams.AWS.Account != "" {
- value = "aws/" + s.config.ConfigParams.AWS.Account
- }
case ExtraResourcesTags:
if s.config.ConfigParams.AWS.TeamTag != "" {
value = "team:" + s.config.ConfigParams.AWS.TeamTag
}
+ case Environments:
+ if s.config.ConfigParams.AWS.Account != "" {
+ value = value + fmt.Sprintf("aws/%s ", s.config.ConfigParams.AWS.Account)
+ }
+ if s.config.ConfigParams.Azure.Account != "" {
+ value = value + fmt.Sprintf("az/%s ", s.config.ConfigParams.Azure.Account)
+ }
+ value = strings.TrimSpace(value)
+
case VerifyCodeSignature:
value = s.config.ConfigParams.Agent.VerifyCodeSignature
case OutputDir:
@@ -145,6 +164,8 @@ func (s configFileValueStore) get(key StoreKey) (string, error) {
value = s.config.ConfigParams.Pulumi.LogLevel
case PulumiLogToStdErr:
value = s.config.ConfigParams.Pulumi.LogToStdErr
+ case PulumiVerboseProgressStreams:
+ value = s.config.ConfigParams.Pulumi.VerboseProgressStreams
case DevMode:
value = s.config.ConfigParams.DevMode
}
diff --git a/test/new-e2e/pkg/runner/parameters/store_config_file_test.go b/test/new-e2e/pkg/runner/parameters/store_config_file_test.go
index 449743a3203c1..68826e44f3b67 100644
--- a/test/new-e2e/pkg/runner/parameters/store_config_file_test.go
+++ b/test/new-e2e/pkg/runner/parameters/store_config_file_test.go
@@ -58,7 +58,7 @@ func Test_NewConfigFileStore(t *testing.T) {
value, err = store.Get(Environments)
assert.NoError(t, err)
- assert.Equal(t, "aws/kiki", value)
+ assert.Equal(t, "aws/kiki az/tata", value)
value, err = store.Get(APIKey)
assert.NoError(t, err)
diff --git a/test/new-e2e/pkg/runner/parameters/store_env.go b/test/new-e2e/pkg/runner/parameters/store_env.go
index 280f2285e9918..3c7f1e62f3b13 100644
--- a/test/new-e2e/pkg/runner/parameters/store_env.go
+++ b/test/new-e2e/pkg/runner/parameters/store_env.go
@@ -6,12 +6,36 @@
package parameters
import (
+ "fmt"
"os"
"strings"
)
var _ valueStore = &envValueStore{}
+var envVariablesByStoreKey = map[StoreKey]string{
+ APIKey: "E2E_API_KEY",
+ APPKey: "E2E_APP_KEY",
+ Environments: "E2E_ENVIRONMENTS",
+ ExtraResourcesTags: "E2E_EXTRA_RESOURCES_TAGS",
+ KeyPairName: "E2E_KEY_PAIR_NAME",
+ PrivateKeyPassword: "E2E_PRIVATE_KEY_PASSWORD",
+ PrivateKeyPath: "E2E_PRIVATE_KEY_PATH",
+ Profile: "E2E_PROFILE",
+ PublicKeyPath: "E2E_PUBLIC_KEY_PATH",
+ PulumiPassword: "E2E_PULUMI_PASSWORD",
+ SkipDeleteOnFailure: "E2E_SKIP_DELETE_ON_FAILURE",
+ StackParameters: "E2E_STACK_PARAMS",
+ PipelineID: "E2E_PIPELINE_ID",
+ CommitSHA: "E2E_COMMIT_SHA",
+ VerifyCodeSignature: "E2E_VERIFY_CODE_SIGNATURE",
+ OutputDir: "E2E_OUTPUT_DIR",
+ PulumiLogLevel: "E2E_PULUMI_LOG_LEVEL",
+ PulumiLogToStdErr: "E2E_PULUMI_LOG_TO_STDERR",
+ PulumiVerboseProgressStreams: "E2E_PULUMI_VERBOSE_PROGRESS_STREAMS",
+ DevMode: "E2E_DEV_MODE",
+}
+
type envValueStore struct {
prefix string
}
@@ -30,7 +54,11 @@ func newEnvValueStore(prefix string) envValueStore {
// Get returns parameter value.
// For env Store, the key is upper cased and added to prefix
func (s envValueStore) get(key StoreKey) (string, error) {
- envValueStoreKey := strings.ToUpper(s.prefix + string(key))
+ envValueStoreKey := envVariablesByStoreKey[key]
+ if envValueStoreKey == "" {
+ fmt.Printf("key [%s] not found in envValueStoreKey, converting to `strings.ToUpper(E2E_)`\n", key)
+ envValueStoreKey = strings.ToUpper(s.prefix + string(key))
+ }
val, found := os.LookupEnv(strings.ToUpper(envValueStoreKey))
if !found {
return "", ParameterNotFoundError{key: key}
diff --git a/test/new-e2e/pkg/runner/parameters/testfixtures/test_config_with_stackparams.yaml b/test/new-e2e/pkg/runner/parameters/testfixtures/test_config_with_stackparams.yaml
index 91bcd76ddf801..44d7d2ab7a039 100644
--- a/test/new-e2e/pkg/runner/parameters/testfixtures/test_config_with_stackparams.yaml
+++ b/test/new-e2e/pkg/runner/parameters/testfixtures/test_config_with_stackparams.yaml
@@ -4,6 +4,8 @@ configParams:
keyPairName: "totoro"
publicKeyPath: "/Users/totoro/.ssh/id_rsa.pub"
teamTag: "miyazaki"
+ azure:
+ account: "tata"
agent:
apiKey: "00000000000000000000000000000000"
options:
diff --git a/test/new-e2e/pkg/runner/profile.go b/test/new-e2e/pkg/runner/profile.go
index 3d707b214e145..a4048093c2d10 100644
--- a/test/new-e2e/pkg/runner/profile.go
+++ b/test/new-e2e/pkg/runner/profile.go
@@ -9,17 +9,15 @@ import (
"fmt"
"hash/fnv"
"io"
+ "maps"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
- "time"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters"
-
- "testing"
)
// CloudProvider alias to string
@@ -54,7 +52,7 @@ type Profile interface {
// Since one Workspace supports one single program and we have one program per stack,
// the path should be unique for each stack.
GetWorkspacePath(stackName string) string
- // ParamStore() returns the normal parameter store
+ // ParamStore returns the normal parameter store
ParamStore() parameters.Store
// SecretStore returns the secure parameter store
SecretStore() parameters.Store
@@ -63,9 +61,9 @@ type Profile interface {
// AllowDevMode returns if DevMode is allowed
AllowDevMode() bool
// GetOutputDir returns the root output directory for tests to store output files and artifacts.
- // e.g. /tmp/e2e-output/2020-01-01_00-00-00_
+ // e.g. /tmp/e2e-output/ or ~/e2e-output/
//
- // See GetTestOutputDir for a function that returns a subdirectory for a specific test.
+ // It is recommended to use GetTestOutputDir to create a subdirectory for a specific test.
GetOutputDir() (string, error)
}
@@ -77,7 +75,6 @@ type baseProfile struct {
secretStore parameters.Store
workspaceRootFolder string
defaultOutputRootFolder string
- outputRootFolder string
}
func newProfile(projectName string, environments []string, store parameters.Store, secretStore *parameters.Store, defaultOutputRoot string) baseProfile {
@@ -98,6 +95,27 @@ func newProfile(projectName string, environments []string, store parameters.Stor
return p
}
+// mergeEnvironments returns a string with a space separated list of available environments. It merges environments with a `defaultEnvironments` map
+func mergeEnvironments(environments string, defaultEnvironments map[string]string) string {
+ environmentsList := strings.Split(environments, " ")
+ // set merged map capacity to worst case scenario of no overlapping key
+ mergedEnvironmentsMap := make(map[string]string, len(defaultEnvironments)+len(environmentsList))
+ maps.Copy(mergedEnvironmentsMap, defaultEnvironments)
+ for _, env := range environmentsList {
+ parts := strings.Split(env, "/")
+ if len(parts) == 2 {
+ mergedEnvironmentsMap[parts[0]] = parts[1]
+ }
+ }
+
+ mergedEnvironmentsList := make([]string, 0, len(mergedEnvironmentsMap))
+ for k, v := range mergedEnvironmentsMap {
+ mergedEnvironmentsList = append(mergedEnvironmentsList, fmt.Sprintf("%s/%s", k, v))
+ }
+
+ return strings.Join(mergedEnvironmentsList, " ")
+}
+
// EnvironmentNames returns a comma-separated list of environments that the profile targets
func (p baseProfile) EnvironmentNames() string {
return strings.Join(p.environments, envSep)
@@ -118,55 +136,30 @@ func (p baseProfile) SecretStore() parameters.Store {
return p.secretStore
}
-// GetOutputDir returns the root output directory for tests to store output files and artifacts.
-// The directory is created on the first call to this function, normally this will be when a
-// test calls GetTestOutputDir.
+// GetOutputDir returns the root output directory to be used to store output files and artifacts.
+// A path is returned but the directory is not created.
//
// The root output directory is chosen in the following order:
// - outputDir parameter from the runner configuration, or E2E_OUTPUT_DIR environment variable
-// - default provided by a parent profile, /e2e-output, e.g. $CI_PROJECT_DIR/e2e-output
+// - default provided by profile, /e2e-output, e.g. $CI_PROJECT_DIR/e2e-output
// - os.TempDir()/e2e-output
//
-// A timestamp is appended to the root output directory to distinguish between multiple runs,
-// and os.MkdirTemp() is used to avoid name collisions between parallel runs.
-//
// See GetTestOutputDir for a function that returns a subdirectory for a specific test.
func (p baseProfile) GetOutputDir() (string, error) {
- if p.outputRootFolder == "" {
- var outputRoot string
- configOutputRoot, err := p.store.GetWithDefault(parameters.OutputDir, "")
- if err != nil {
- return "", err
- }
- if configOutputRoot != "" {
- // If outputRoot is provided in the config file, use it as the root directory
- outputRoot = configOutputRoot
- } else if p.defaultOutputRootFolder != "" {
- // If a default outputRoot was provided, use it as the root directory
- outputRoot = filepath.Join(p.defaultOutputRootFolder, "e2e-output")
- } else if outputRoot == "" {
- // If outputRoot is not provided, use os.TempDir() as the root directory
- outputRoot = filepath.Join(os.TempDir(), "e2e-output")
- }
- // Append timestamp to distinguish between multiple runs
- // Format: YYYY-MM-DD_HH-MM-SS
- // Use a custom timestamp format because Windows paths can't contain ':' characters
- // and we don't need the timezone information.
- timePart := time.Now().Format("2006-01-02_15-04-05")
- // create root directory
- err = os.MkdirAll(outputRoot, 0755)
- if err != nil {
- return "", err
- }
- // Create final output directory
- // Use MkdirTemp to avoid name collisions between parallel runs
- outputRoot, err = os.MkdirTemp(outputRoot, fmt.Sprintf("%s_*", timePart))
- if err != nil {
- return "", err
- }
- p.outputRootFolder = outputRoot
+ configOutputRoot, err := p.store.GetWithDefault(parameters.OutputDir, "")
+ if err != nil {
+ return "", err
}
- return p.outputRootFolder, nil
+ if configOutputRoot != "" {
+ // If outputRoot is provided in the config file, use it as the root directory
+ return configOutputRoot, nil
+ }
+ if p.defaultOutputRootFolder != "" {
+ // If a default outputRoot was provided, use it as the root directory
+ return filepath.Join(p.defaultOutputRootFolder, "e2e-output"), nil
+ }
+ // as a final fallback, use os.TempDir() as the root directory
+ return filepath.Join(os.TempDir(), "e2e-output"), nil
}
// GetWorkspacePath returns the directory for CI Pulumi workspace.
@@ -200,21 +193,3 @@ func GetProfile() Profile {
return runProfile
}
-
-// GetTestOutputDir returns the output directory for a specific test.
-// The test name is sanitized to remove invalid characters, and the output directory is created.
-func GetTestOutputDir(p Profile, t *testing.T) (string, error) {
- // https://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words
- invalidPathChars := strings.Join([]string{"?", "%", "*", ":", "|", "\"", "<", ">", ".", ",", ";", "="}, "")
- root, err := p.GetOutputDir()
- if err != nil {
- return "", err
- }
- testPart := strings.ReplaceAll(t.Name(), invalidPathChars, "_")
- path := filepath.Join(root, testPart)
- err = os.MkdirAll(path, 0755)
- if err != nil {
- return "", err
- }
- return path, nil
-}
diff --git a/test/new-e2e/pkg/runner/profile_test.go b/test/new-e2e/pkg/runner/profile_test.go
index fca983645d795..9fa3f51e62984 100644
--- a/test/new-e2e/pkg/runner/profile_test.go
+++ b/test/new-e2e/pkg/runner/profile_test.go
@@ -8,6 +8,8 @@
package runner
import (
+ "slices"
+ "strings"
"testing"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters"
@@ -42,3 +44,45 @@ func TestGetWorkspacePath(t *testing.T) {
})
}
}
+
+func TestDefaultEnvironments(t *testing.T) {
+ type args struct {
+ environments string
+ defaultEnvironments map[string]string
+ }
+ tests := []struct {
+ name string
+ args args
+ want []string
+ }{
+ {
+ name: "default",
+ args: args{environments: "", defaultEnvironments: map[string]string{"aws": "agent-sandbox", "az": "agent-sandbox"}},
+ want: []string{"aws/agent-sandbox", "az/agent-sandbox"},
+ },
+ {
+ name: "override",
+ args: args{environments: "aws/agent-qa", defaultEnvironments: map[string]string{"aws": "agent-sandbox", "az": "agent-sandbox"}},
+ want: []string{"aws/agent-qa", "az/agent-sandbox"},
+ },
+ {
+ name: "override with extra",
+ args: args{environments: "aws/agent-sandbox gcp/agent-sandbox", defaultEnvironments: map[string]string{"aws": "agent-sandbox", "az": "agent-sandbox"}},
+ want: []string{"aws/agent-sandbox", "gcp/agent-sandbox", "az/agent-sandbox"},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := mergeEnvironments(tt.args.environments, tt.args.defaultEnvironments)
+ gotList := strings.Split(got, " ")
+ if len(gotList) != len(tt.want) {
+ t.Errorf("mergeEnvironments() = %v, want %v", got, tt.want)
+ }
+ for _, v := range gotList {
+ if !slices.Contains(tt.want, v) {
+ t.Errorf("mergeEnvironments() = %v, want %v", got, tt.want)
+ }
+ }
+ })
+ }
+}
diff --git a/test/new-e2e/pkg/utils/clients/aws.go b/test/new-e2e/pkg/utils/clients/aws.go
index c234c69695ceb..560e1296605e2 100644
--- a/test/new-e2e/pkg/utils/clients/aws.go
+++ b/test/new-e2e/pkg/utils/clients/aws.go
@@ -7,6 +7,7 @@ package clients
import (
"context"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
"sync"
"time"
@@ -51,7 +52,10 @@ func getAWSConfig() (*aws.Config, error) {
ctx, cancel := context.WithTimeout(context.Background(), awsTimeout)
defer cancel()
- cfg, err := awsconfig.LoadDefaultConfig(ctx)
+ // https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/retries-timeouts/
+ cfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRetryer(func() aws.Retryer {
+ return retry.AddWithMaxAttempts(retry.NewStandard(), 5)
+ }))
if err != nil {
return nil, err
}
diff --git a/test/new-e2e/pkg/utils/clients/ssh.go b/test/new-e2e/pkg/utils/clients/ssh.go
deleted file mode 100644
index 7effffa089432..0000000000000
--- a/test/new-e2e/pkg/utils/clients/ssh.go
+++ /dev/null
@@ -1,372 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016-present Datadog, Inc.
-
-package clients
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "io/fs"
- "net"
- "os"
- "path"
- "time"
-
- "github.com/cenkalti/backoff"
- "github.com/pkg/sftp"
- "golang.org/x/crypto/ssh"
- "golang.org/x/crypto/ssh/agent"
-)
-
-// GetSSHClient returns an ssh Client for the specified host
-func GetSSHClient(user, host string, privateKey, privateKeyPassphrase []byte, retryInterval time.Duration, maxRetries uint64) (client *ssh.Client, err error) {
- err = backoff.Retry(func() error {
- client, err = getSSHClient(user, host, privateKey, privateKeyPassphrase)
- return err
- }, backoff.WithMaxRetries(backoff.NewConstantBackOff(retryInterval), maxRetries))
-
- return
-}
-
-func getSSHClient(user, host string, privateKey, privateKeyPassphrase []byte) (*ssh.Client, error) {
- var auth ssh.AuthMethod
-
- if len(privateKey) > 0 {
- var privateKeyAuth ssh.Signer
- var err error
-
- if len(privateKeyPassphrase) > 0 {
- privateKeyAuth, err = ssh.ParsePrivateKeyWithPassphrase(privateKey, privateKeyPassphrase)
- } else {
- privateKeyAuth, err = ssh.ParsePrivateKey(privateKey)
- }
-
- if err != nil {
- return nil, err
- }
- auth = ssh.PublicKeys(privateKeyAuth)
- } else {
- // Use the ssh agent
- conn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK"))
- if err != nil {
- return nil, fmt.Errorf("no ssh key provided and cannot connect to the ssh agent: %v", err)
- }
- defer conn.Close()
- sshAgent := agent.NewClient(conn)
- auth = ssh.PublicKeysCallback(sshAgent.Signers)
- }
-
- sshConfig := &ssh.ClientConfig{
- User: user,
- Auth: []ssh.AuthMethod{auth},
- HostKeyCallback: ssh.InsecureIgnoreHostKey(),
- }
-
- client, err := ssh.Dial("tcp", host, sshConfig)
- if err != nil {
- return nil, err
- }
-
- session, err := client.NewSession()
- if err != nil {
- client.Close()
- return nil, err
- }
- err = session.Close()
- if err != nil {
- return nil, err
- }
-
- return client, nil
-}
-
-// ExecuteCommand creates a session on an ssh client and runs a command.
-// It returns the command output and errors
-func ExecuteCommand(client *ssh.Client, command string) (string, error) {
- session, err := client.NewSession()
- if err != nil {
- return "", fmt.Errorf("failed to create session: %v", err)
- }
-
- stdout, err := session.CombinedOutput(command)
-
- return string(stdout), err
-}
-
-// CopyFile create a sftp session and copy a single file to the remote host through SSH
-func CopyFile(client *ssh.Client, src string, dst string) error {
- sftpClient, err := sftp.NewClient(client)
- if err != nil {
- return err
- }
- defer sftpClient.Close()
-
- return copyFile(sftpClient, src, dst)
-}
-
-// CopyFolder create a sftp session and copy a folder to remote host through SSH
-func CopyFolder(client *ssh.Client, srcFolder string, dstFolder string) error {
- sftpClient, err := sftp.NewClient(client)
- if err != nil {
- return err
- }
- defer sftpClient.Close()
-
- return copyFolder(sftpClient, srcFolder, dstFolder)
-}
-
-// GetFile create a sftp session and copy a single file from the remote host through SSH
-func GetFile(client *ssh.Client, src string, dst string) error {
- sftpClient, err := sftp.NewClient(client)
- if err != nil {
- return err
- }
- defer sftpClient.Close()
-
- // remote
- fsrc, err := sftpClient.Open(src)
- if err != nil {
- return err
- }
- defer fsrc.Close()
-
- // local
- fdst, err := os.Create(dst)
- if err != nil {
- return err
- }
- defer fdst.Close()
-
- _, err = fsrc.WriteTo(fdst)
- return err
-}
-
-func copyFolder(sftpClient *sftp.Client, srcFolder string, dstFolder string) error {
- folderContent, err := os.ReadDir(srcFolder)
- if err != nil {
- return err
- }
-
- if err := sftpClient.MkdirAll(dstFolder); err != nil {
- return err
- }
-
- for _, d := range folderContent {
- if !d.IsDir() {
- err := copyFile(sftpClient, path.Join(srcFolder, d.Name()), path.Join(dstFolder, d.Name()))
- if err != nil {
- return err
- }
- } else {
- err = copyFolder(sftpClient, path.Join(srcFolder, d.Name()), path.Join(dstFolder, d.Name()))
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func copyFile(sftpClient *sftp.Client, src string, dst string) error {
- srcFile, err := os.Open(src)
- if err != nil {
- return err
- }
- defer srcFile.Close()
-
- dstFile, err := sftpClient.Create(dst)
- if err != nil {
- return err
- }
- defer dstFile.Close()
-
- if _, err := dstFile.ReadFrom(srcFile); err != nil {
- return err
- }
- return nil
-}
-
-// FileExists create a sftp session to and returns true if the file exists and is a regular file
-func FileExists(client *ssh.Client, path string) (bool, error) {
- sftpClient, err := sftp.NewClient(client)
- if err != nil {
- return false, err
- }
- defer sftpClient.Close()
-
- info, err := sftpClient.Lstat(path)
- if err != nil {
- if errors.Is(err, fs.ErrNotExist) {
- return false, nil
- }
- return false, err
- }
-
- return info.Mode().IsRegular(), nil
-}
-
-// ReadFile reads the content of the file, return bytes read and error if any
-func ReadFile(client *ssh.Client, path string) ([]byte, error) {
- sftpClient, err := sftp.NewClient(client)
- if err != nil {
- return nil, err
- }
- defer sftpClient.Close()
-
- f, err := sftpClient.Open(path)
- if err != nil {
- return nil, err
- }
-
- var content bytes.Buffer
- _, err = io.Copy(&content, f)
- if err != nil {
- return content.Bytes(), err
- }
-
- return content.Bytes(), nil
-}
-
-// WriteFile write content to the file and returns the number of bytes written and error if any
-func WriteFile(client *ssh.Client, path string, content []byte) (int64, error) {
- sftpClient, err := sftp.NewClient(client)
- if err != nil {
- return 0, err
- }
- defer sftpClient.Close()
-
- f, err := sftpClient.Create(path)
- if err != nil {
- return 0, err
- }
- defer f.Close()
-
- reader := bytes.NewReader(content)
- return io.Copy(f, reader)
-}
-
-// AppendFile append content to the file and returns the number of bytes appened and error if any
-func AppendFile(client *ssh.Client, os, path string, content []byte) (int64, error) {
- if os == "linux" {
- return appendWithSudo(client, path, content)
- }
- return appendWithSftp(client, path, content)
-
-}
-
-// appendWithSudo appends content to the file using sudo tee for Linux environment
-func appendWithSudo(client *ssh.Client, path string, content []byte) (int64, error) {
- cmd := fmt.Sprintf("echo '%s' | sudo tee -a %s", string(content), path)
- session, err := client.NewSession()
- if err != nil {
- return 0, err
- }
- defer session.Close()
-
- var b bytes.Buffer
- session.Stdout = &b
- if err := session.Run(cmd); err != nil {
- return 0, err
- }
-
- return int64(len(content)), nil
-}
-
-// appendWithSftp appends content to the file using sftp for Windows environment
-func appendWithSftp(client *ssh.Client, path string, content []byte) (int64, error) {
- sftpClient, err := sftp.NewClient(client)
- if err != nil {
- return 0, err
- }
- defer sftpClient.Close()
-
- // Open the file in append mode and create it if it doesn't exist
- f, err := sftpClient.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY)
- if err != nil {
- return 0, err
- }
- defer f.Close()
-
- reader := bytes.NewReader(content)
- written, err := io.Copy(f, reader)
- if err != nil {
- return 0, err
- }
-
- return written, nil
-}
-
-// ReadDir returns list of directory entries in path
-func ReadDir(client *ssh.Client, path string) ([]fs.DirEntry, error) {
- sftpClient, err := sftp.NewClient(client)
- if err != nil {
- return nil, err
- }
- defer sftpClient.Close()
-
- infos, err := sftpClient.ReadDir(path)
- if err != nil {
- return nil, err
- }
-
- entries := make([]fs.DirEntry, 0, len(infos))
- for _, info := range infos {
- entry := fs.FileInfoToDirEntry(info)
- entries = append(entries, entry)
- }
-
- return entries, nil
-}
-
-// Lstat returns a FileInfo structure describing path.
-// if path is a symbolic link, the FileInfo structure describes the symbolic link.
-func Lstat(client *ssh.Client, path string) (fs.FileInfo, error) {
- sftpClient, err := sftp.NewClient(client)
- if err != nil {
- return nil, err
- }
- defer sftpClient.Close()
-
- return sftpClient.Lstat(path)
-}
-
-// MkdirAll creates the specified directory along with any necessary parents.
-// If the path is already a directory, does nothing and returns nil.
-// Otherwise returns an error if any.
-func MkdirAll(client *ssh.Client, path string) error {
- sftpClient, err := sftp.NewClient(client)
- if err != nil {
- return err
- }
- defer sftpClient.Close()
-
- return sftpClient.MkdirAll(path)
-}
-
-// Remove removes the specified file or directory.
-// Returns an error if file or directory does not exist, or if the directory is not empty.
-func Remove(client *ssh.Client, path string) error {
- sftpClient, err := sftp.NewClient(client)
- if err != nil {
- return err
- }
- defer sftpClient.Close()
-
- return sftpClient.Remove(path)
-}
-
-// RemoveAll recursively removes all files/folders in the specified directory.
-// Returns an error if the directory does not exist.
-func RemoveAll(client *ssh.Client, path string) error {
- sftpClient, err := sftp.NewClient(client)
- if err != nil {
- return err
- }
- defer sftpClient.Close()
-
- return sftpClient.RemoveAll(path)
-}
diff --git a/test/new-e2e/pkg/utils/common/internal_error.go b/test/new-e2e/pkg/utils/common/internal_error.go
new file mode 100644
index 0000000000000..e04881515b363
--- /dev/null
+++ b/test/new-e2e/pkg/utils/common/internal_error.go
@@ -0,0 +1,25 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+// Package common implements utilities shared across the e2e tests
+package common
+
+import "fmt"
+
+// InternalError is an error type used to wrap internal errors
+type InternalError struct {
+ Err error
+}
+
+// Error returns a printable InternalError
+func (i InternalError) Error() string {
+ return fmt.Sprintf("E2E INTERNAL ERROR: %v", i.Err)
+}
+
+// Is returns true if the target error is an InternalError
+func (i InternalError) Is(target error) bool {
+ _, ok := target.(InternalError)
+ return ok
+}
diff --git a/test/new-e2e/pkg/utils/e2e/client/agent_client.go b/test/new-e2e/pkg/utils/e2e/client/agent_client.go
index 0752fd38e455b..1a362ac6e4f32 100644
--- a/test/new-e2e/pkg/utils/e2e/client/agent_client.go
+++ b/test/new-e2e/pkg/utils/e2e/client/agent_client.go
@@ -6,11 +6,22 @@
package client
import (
+ "fmt"
+ "net/http"
+ "regexp"
+ "strings"
"testing"
"time"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components"
+ "github.com/DataDog/test-infra-definitions/components/datadog/agent"
+ osComp "github.com/DataDog/test-infra-definitions/components/os"
+ "github.com/DataDog/test-infra-definitions/components/remote"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclient"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams"
)
const (
@@ -18,23 +29,57 @@ const (
)
// NewHostAgentClient creates an Agent client for host install
-func NewHostAgentClient(t *testing.T, host *components.RemoteHost, waitForAgentReady bool) (agentclient.Agent, error) {
- commandRunner := newAgentCommandRunner(t, newAgentHostExecutor(host))
+func NewHostAgentClient(context e2e.Context, hostOutput remote.HostOutput, waitForAgentReady bool) (agentclient.Agent, error) {
+ params := agentclientparams.NewParams(hostOutput.OSFamily)
+ params.ShouldWaitForReady = waitForAgentReady
- if waitForAgentReady {
- if err := commandRunner.waitForReadyTimeout(agentReadyTimeout); err != nil {
+ host, err := NewHost(context, hostOutput)
+ if err != nil {
+ return nil, err
+ }
+
+ ae := newAgentHostExecutor(hostOutput.OSFamily, host, params)
+ commandRunner := newAgentCommandRunner(context.T(), ae)
+
+ if params.ShouldWaitForReady {
+ if err := waitForReadyTimeout(context.T(), host, commandRunner, agentReadyTimeout); err != nil {
+ return nil, err
+ }
+ }
+
+ return commandRunner, nil
+}
+
+// NewHostAgentClientWithParams creates an Agent client for host install with custom parameters
+func NewHostAgentClientWithParams(context e2e.Context, hostOutput remote.HostOutput, options ...agentclientparams.Option) (agentclient.Agent, error) {
+ params := agentclientparams.NewParams(hostOutput.OSFamily, options...)
+
+ host, err := NewHost(context, hostOutput)
+ if err != nil {
+ return nil, err
+ }
+
+ ae := newAgentHostExecutor(hostOutput.OSFamily, host, params)
+ commandRunner := newAgentCommandRunner(context.T(), ae)
+
+ if params.ShouldWaitForReady {
+ if err := waitForReadyTimeout(context.T(), host, commandRunner, agentReadyTimeout); err != nil {
return nil, err
}
}
+ waitForAgentsReady(context.T(), host, params)
+
return commandRunner, nil
}
// NewDockerAgentClient creates an Agent client for a Docker install
-func NewDockerAgentClient(t *testing.T, docker *Docker, agentContainerName string, waitForAgentReady bool) (agentclient.Agent, error) {
- commandRunner := newAgentCommandRunner(t, newAgentDockerExecutor(docker, agentContainerName))
+func NewDockerAgentClient(context e2e.Context, dockerAgentOutput agent.DockerAgentOutput, options ...agentclientparams.Option) (agentclient.Agent, error) {
+ params := agentclientparams.NewParams(dockerAgentOutput.DockerManager.Host.OSFamily, options...)
+ ae := newAgentDockerExecutor(context, dockerAgentOutput)
+ commandRunner := newAgentCommandRunner(context.T(), ae)
- if waitForAgentReady {
+ if params.ShouldWaitForReady {
if err := commandRunner.waitForReadyTimeout(agentReadyTimeout); err != nil {
return nil, err
}
@@ -42,3 +87,191 @@ func NewDockerAgentClient(t *testing.T, docker *Docker, agentContainerName strin
return commandRunner, nil
}
+
+// waitForAgentsReady waits for the given non-core agents to be ready.
+// The given options configure which Agents to wait for, and how long to wait.
+//
+// Under the hood, this function checks the readiness of the agents by querying their status endpoints.
+// The function will wait until all agents are ready, or until the timeout is reached.
+// If the timeout is reached, an error is returned.
+//
+// As of now this is only implemented for Linux.
+func waitForAgentsReady(tt *testing.T, host *Host, params *agentclientparams.Params) {
+ hostHTTPClient := host.NewHTTPClient()
+ require.EventuallyWithT(tt, func(t *assert.CollectT) {
+ agentReadyCmds := map[string]func(*agentclientparams.Params, *Host) (*http.Request, bool, error){
+ "process-agent": processAgentRequest,
+ "trace-agent": traceAgentRequest,
+ "security-agent": securityAgentRequest,
+ }
+
+ for name, getReadyRequest := range agentReadyCmds {
+ req, ok, err := getReadyRequest(params, host)
+ if !assert.NoErrorf(t, err, "could not build ready command for %s", name) {
+ continue
+ }
+
+ if !ok {
+ continue
+ }
+
+ tt.Logf("Checking if %s is ready...", name)
+ resp, err := hostHTTPClient.Do(req)
+ if assert.NoErrorf(t, err, "%s did not become ready", name) {
+ assert.Less(t, resp.StatusCode, 400)
+ resp.Body.Close()
+ }
+ }
+ }, params.WaitForDuration, params.WaitForTick)
+}
+
+func processAgentRequest(params *agentclientparams.Params, host *Host) (*http.Request, bool, error) {
+ return makeStatusEndpointRequest(params, host, "http://localhost:%d/agent/status", params.ProcessAgentPort)
+}
+
+func traceAgentRequest(params *agentclientparams.Params, host *Host) (*http.Request, bool, error) {
+ return makeStatusEndpointRequest(params, host, "http://localhost:%d/info", params.TraceAgentPort)
+}
+
+func securityAgentRequest(params *agentclientparams.Params, host *Host) (*http.Request, bool, error) {
+ return makeStatusEndpointRequest(params, host, "https://localhost:%d/agent/status", params.SecurityAgentPort)
+}
+
+func makeStatusEndpointRequest(params *agentclientparams.Params, host *Host, url string, port int) (*http.Request, bool, error) {
+ if port == 0 {
+ return nil, false, nil
+ }
+
+ // we want to fetch the auth token only if we actually need it
+ if err := ensureAuthToken(params, host); err != nil {
+ return nil, true, err
+ }
+
+ statusEndpoint := fmt.Sprintf(url, port)
+ req, err := http.NewRequest(http.MethodGet, statusEndpoint, nil)
+ if err != nil {
+ return nil, true, err
+ }
+
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", params.AuthToken))
+ return req, true, nil
+}
+
+func ensureAuthToken(params *agentclientparams.Params, host *Host) error {
+ if params.AuthToken != "" {
+ return nil
+ }
+
+ getAuthTokenCmd := fetchAuthTokenCommand(params.AuthTokenPath, host.osFamily)
+ authToken, err := host.Execute(getAuthTokenCmd)
+ if err != nil {
+ return fmt.Errorf("could not read auth token file: %v", err)
+ }
+ params.AuthToken = strings.TrimSpace(authToken)
+
+ return nil
+}
+
+func fetchAuthTokenCommand(authTokenPath string, osFamily osComp.Family) string {
+ if osFamily == osComp.WindowsFamily {
+ return fmt.Sprintf("Get-Content -Raw -Path %s", authTokenPath)
+ }
+
+ return fmt.Sprintf("sudo cat %s", authTokenPath)
+}
+
+func waitForReadyTimeout(t *testing.T, host *Host, commandRunner *agentCommandRunner, timeout time.Duration) error {
+ err := commandRunner.waitForReadyTimeout(timeout)
+
+ if err != nil {
+ // Propagate the original error if we have another error here
+ localErr := generateAndDownloadFlare(t, commandRunner, host)
+
+ if localErr != nil {
+ t.Errorf("Could not generate and get a flare: %v", localErr)
+ }
+ }
+
+ return err
+}
+
+func generateAndDownloadFlare(t *testing.T, commandRunner *agentCommandRunner, host *Host) error {
+ root, err := e2e.CreateRootOutputDir()
+ if err != nil {
+ return fmt.Errorf("could not get root output directory: %w", err)
+ }
+ outputDir, err := e2e.CreateTestOutputDir(root, t)
+ if err != nil {
+ return fmt.Errorf("could not get output directory: %w", err)
+ }
+ flareFound := false
+
+ _, err = commandRunner.FlareWithError(agentclient.WithArgs([]string{"--email", "e2e@test.com", "--send", "--local"}))
+ if err != nil {
+ t.Errorf("Error while generating the flare: %v.", err)
+ // Do not return now, the flare may be generated locally but was not uploaded because there's no fake intake
+ }
+
+ flareRegex, err := regexp.Compile(`datadog-agent-.*\.zip`)
+ if err != nil {
+ return fmt.Errorf("could not compile regex: %w", err)
+ }
+
+ tmpFolder, err := host.GetTmpFolder()
+ if err != nil {
+ return fmt.Errorf("could not get tmp folder: %w", err)
+ }
+
+ entries, err := host.ReadDir(tmpFolder)
+ if err != nil {
+ return fmt.Errorf("could not read directory: %w", err)
+ }
+
+ for _, entry := range entries {
+ if flareRegex.MatchString(entry.Name()) {
+ t.Logf("Found flare file: %s", entry.Name())
+
+ if host.osFamily != osComp.WindowsFamily {
+ _, err = host.Execute(fmt.Sprintf("sudo chmod 744 %s/%s", tmpFolder, entry.Name()))
+ if err != nil {
+ return fmt.Errorf("could not update permission of flare file %s/%s : %w", tmpFolder, entry.Name(), err)
+ }
+ }
+
+ t.Logf("Downloading flare file in: %s", outputDir)
+ err = host.GetFile(fmt.Sprintf("%s/%s", tmpFolder, entry.Name()), fmt.Sprintf("%s/%s", outputDir, entry.Name()))
+
+ if err != nil {
+ return fmt.Errorf("could not download flare file from %s/%s : %w", tmpFolder, entry.Name(), err)
+ }
+
+ flareFound = true
+ }
+ }
+
+ if !flareFound {
+ t.Errorf("Could not find a flare. Retrieving logs directly instead...")
+
+ logsFolder, err := host.GetLogsFolder()
+ if err != nil {
+ return fmt.Errorf("could not get logs folder: %w", err)
+ }
+
+ entries, err = host.ReadDir(logsFolder)
+
+ if err != nil {
+ return fmt.Errorf("could not read directory: %w", err)
+ }
+
+ for _, entry := range entries {
+ t.Logf("Found log file: %s. Downloading file in: %s", entry.Name(), outputDir)
+
+ err = host.GetFile(fmt.Sprintf("%s/%s", logsFolder, entry.Name()), fmt.Sprintf("%s/%s", outputDir, entry.Name()))
+ if err != nil {
+ return fmt.Errorf("could not download log file from %s/%s : %w", logsFolder, entry.Name(), err)
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/test/new-e2e/pkg/utils/e2e/client/agent_commands.go b/test/new-e2e/pkg/utils/e2e/client/agent_commands.go
index fa1f9f0f06b3f..d20813afe89b2 100644
--- a/test/new-e2e/pkg/utils/e2e/client/agent_commands.go
+++ b/test/new-e2e/pkg/utils/e2e/client/agent_commands.go
@@ -111,6 +111,15 @@ func (agent *agentCommandRunner) Flare(commandArgs ...agentclient.AgentArgsOptio
return agent.executeCommand("flare", commandArgs...)
}
+// FlareWithError runs flare command and returns the output or an error. You should use the FakeIntake client to fetch the flare archive
+func (agent *agentCommandRunner) FlareWithError(commandArgs ...agentclient.AgentArgsOption) (string, error) {
+ args, err := optional.MakeParams(commandArgs...)
+ require.NoError(agent.t, err)
+
+ arguments := append([]string{"flare"}, args.Args...)
+ return agent.executor.execute(arguments)
+}
+
// Health runs health command and returns the runtime agent health
func (agent *agentCommandRunner) Health() (string, error) {
arguments := []string{"health"}
@@ -172,6 +181,7 @@ func (agent *agentCommandRunner) StatusWithError(commandArgs ...agentclient.Agen
func (agent *agentCommandRunner) waitForReadyTimeout(timeout time.Duration) error {
interval := 100 * time.Millisecond
maxRetries := timeout.Milliseconds() / interval.Milliseconds()
+ agent.t.Log("Waiting for the agent to be ready")
err := backoff.Retry(func() error {
_, err := agent.executor.execute([]string{"status"})
if err != nil {
diff --git a/test/new-e2e/pkg/utils/e2e/client/agent_docker.go b/test/new-e2e/pkg/utils/e2e/client/agent_docker.go
index 43f06a638d4ca..218d001049435 100644
--- a/test/new-e2e/pkg/utils/e2e/client/agent_docker.go
+++ b/test/new-e2e/pkg/utils/e2e/client/agent_docker.go
@@ -5,6 +5,11 @@
package client
+import (
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/test-infra-definitions/components/datadog/agent"
+)
+
type agentDockerExecutor struct {
dockerClient *Docker
agentContainerName string
@@ -12,10 +17,14 @@ type agentDockerExecutor struct {
var _ agentCommandExecutor = &agentDockerExecutor{}
-func newAgentDockerExecutor(dockerClient *Docker, agentContainerName string) *agentDockerExecutor {
+func newAgentDockerExecutor(context e2e.Context, dockerAgentOutput agent.DockerAgentOutput) *agentDockerExecutor {
+ dockerClient, err := NewDocker(context.T(), dockerAgentOutput.DockerManager)
+ if err != nil {
+ panic(err)
+ }
return &agentDockerExecutor{
dockerClient: dockerClient,
- agentContainerName: agentContainerName,
+ agentContainerName: dockerAgentOutput.ContainerName,
}
}
diff --git a/test/new-e2e/pkg/utils/e2e/client/agent_host.go b/test/new-e2e/pkg/utils/e2e/client/agent_host.go
index e666f8babbc2b..8b7dc19eacd21 100644
--- a/test/new-e2e/pkg/utils/e2e/client/agent_host.go
+++ b/test/new-e2e/pkg/utils/e2e/client/agent_host.go
@@ -9,26 +9,33 @@ import (
"fmt"
"strings"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components"
"github.com/DataDog/test-infra-definitions/components/os"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams"
+ wincommand "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/command"
)
type agentHostExecutor struct {
baseCommand string
- host *components.RemoteHost
+ host *Host
}
-func newAgentHostExecutor(host *components.RemoteHost) agentCommandExecutor {
+func newAgentHostExecutor(osFamily os.Family, host *Host, params *agentclientparams.Params) agentCommandExecutor {
var baseCommand string
- switch host.OSFamily {
+ switch osFamily {
case os.WindowsFamily:
- baseCommand = `& "$env:ProgramFiles\Datadog\Datadog Agent\bin\agent.exe"`
+ installPath := params.AgentInstallPath
+ if len(installPath) == 0 {
+ installPath = defaultWindowsAgentInstallPath(host)
+ }
+ fmt.Printf("Using default install path: %s\n", installPath)
+ baseCommand = fmt.Sprintf(`& "%s\bin\agent.exe"`, installPath)
case os.LinuxFamily:
baseCommand = "sudo datadog-agent"
case os.MacOSFamily:
baseCommand = "datadog-agent"
default:
- panic(fmt.Sprintf("unsupported OS family: %v", host.OSFamily))
+ panic(fmt.Sprintf("unsupported OS family: %v", osFamily))
}
return &agentHostExecutor{
@@ -45,3 +52,15 @@ func (ae agentHostExecutor) execute(arguments []string) (string, error) {
return ae.host.Execute(ae.baseCommand + " " + parameters)
}
+
+// defaultWindowsAgentInstallPath returns a reasonable default for the AgentInstallPath.
+//
+// If the Agent is installed, the installPath is read from the registry.
+// If the registry key is not found, returns the default install path.
+func defaultWindowsAgentInstallPath(host *Host) string {
+ path, err := host.Execute(wincommand.GetInstallPathFromRegistry())
+ if err != nil {
+ path = wincommand.DefaultInstallPath
+ }
+ return strings.TrimSpace(path)
+}
diff --git a/test/new-e2e/pkg/utils/e2e/client/agentclientparams/agent_client_params.go b/test/new-e2e/pkg/utils/e2e/client/agentclientparams/agent_client_params.go
index 50d4174901472..821ab7a18ce77 100644
--- a/test/new-e2e/pkg/utils/e2e/client/agentclientparams/agent_client_params.go
+++ b/test/new-e2e/pkg/utils/e2e/client/agentclientparams/agent_client_params.go
@@ -6,15 +6,42 @@
// Package agentclientparams implements function parameters for [e2e.Agent]
package agentclientparams
+import (
+ "fmt"
+ "time"
+
+ osComp "github.com/DataDog/test-infra-definitions/components/os"
+)
+
// Params defines the parameters for the Agent client.
// The Params configuration uses the [Functional options pattern].
//
// The available options are:
// - [WithSkipWaitForAgentReady]
+// - [WithAgentInstallPath]
+// - [WithAuthToken]
+// - [WithAuthTokenPath]
+// - [WithProcessAgentOnPort]
+// - [WithProcessAgent]
+// - [WithTraceAgentOnPort]
+// - [WithTraceAgent]
+// - [WithSecurityAgentOnPort]
+// - [WithSecurityAgent]
+// - [WithWaitForDuration]
+// - [WithWaitForTick]
//
// [Functional options pattern]: https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
type Params struct {
ShouldWaitForReady bool
+ AgentInstallPath string
+
+ AuthToken string
+ AuthTokenPath string
+ ProcessAgentPort int
+ TraceAgentPort int
+ SecurityAgentPort int
+ WaitForDuration time.Duration
+ WaitForTick time.Duration
}
// Option alias to a functional option changing a given Params instance
@@ -22,9 +49,12 @@ type Option func(*Params)
// NewParams creates a new instance of Agent client params
// default ShouldWaitForReady: true
-func NewParams(options ...Option) *Params {
+func NewParams(osfam osComp.Family, options ...Option) *Params {
p := &Params{
ShouldWaitForReady: true,
+ AuthTokenPath: defaultAuthTokenPath(osfam),
+ WaitForDuration: 1 * time.Minute,
+ WaitForTick: 5 * time.Second,
}
return applyOption(p, options...)
}
@@ -43,3 +73,88 @@ func WithSkipWaitForAgentReady() Option {
p.ShouldWaitForReady = false
}
}
+
+// WithAgentInstallPath sets the agent installation path
+func WithAgentInstallPath(path string) Option {
+ return func(p *Params) {
+ p.AgentInstallPath = path
+ }
+}
+
+// WithAuthToken sets the auth token.
+func WithAuthToken(authToken string) Option {
+ return func(p *Params) {
+ p.AuthToken = authToken
+ }
+}
+
+// WithAuthTokenPath sets the path to the auth token file.
+// The file is read from the remote host.
+// This is not used if the auth token is provided directly with WithAuthToken.
+func WithAuthTokenPath(path string) Option {
+ return func(p *Params) {
+ p.AuthTokenPath = path
+ }
+}
+
+// WithProcessAgentOnPort enables waiting for the Process Agent, using the given port for the API.
+func WithProcessAgentOnPort(port int) Option {
+ return func(p *Params) {
+ p.ProcessAgentPort = port
+ }
+}
+
+// WithProcessAgent enables waiting for the Process Agent, using the default API port.
+func WithProcessAgent() Option {
+ return WithProcessAgentOnPort(6162)
+}
+
+// WithTraceAgentOnPort enables waiting for the Trace Agent, using the given port for the API.
+func WithTraceAgentOnPort(port int) Option {
+ return func(p *Params) {
+ p.TraceAgentPort = port
+ }
+}
+
+// WithTraceAgent enables waiting for the Trace Agent, using the default API port.
+func WithTraceAgent() Option {
+ return WithTraceAgentOnPort(5012)
+}
+
+// WithSecurityAgentOnPort enables waiting for the Security Agent, using the given port for the API.
+func WithSecurityAgentOnPort(port int) Option {
+ return func(p *Params) {
+ p.SecurityAgentPort = port
+ }
+}
+
+// WithSecurityAgent enables waiting for the Security Agent, using the default API port.
+func WithSecurityAgent() Option {
+ return WithSecurityAgentOnPort(5010)
+}
+
+// WithWaitForDuration sets the duration to wait for the agents to be ready.
+func WithWaitForDuration(d time.Duration) Option {
+ return func(p *Params) {
+ p.WaitForDuration = d
+ }
+}
+
+// WithWaitForTick sets the duration between checks for the agents to be ready.
+func WithWaitForTick(d time.Duration) Option {
+ return func(p *Params) {
+ p.WaitForTick = d
+ }
+}
+
+func defaultAuthTokenPath(osfam osComp.Family) string {
+ switch osfam {
+ case osComp.LinuxFamily:
+ return "/etc/datadog-agent/auth_token"
+ case osComp.WindowsFamily:
+ return "C:\\ProgramData\\Datadog\\auth_token"
+ case osComp.MacOSFamily:
+ return "/opt/datadog-agent/etc/auth_token"
+ }
+ panic(fmt.Sprintf("unsupported OS family %d", osfam))
+}
diff --git a/test/new-e2e/pkg/utils/e2e/client/docker.go b/test/new-e2e/pkg/utils/e2e/client/docker.go
index 5790775600fe4..1ff037cf0fc96 100644
--- a/test/new-e2e/pkg/utils/e2e/client/docker.go
+++ b/test/new-e2e/pkg/utils/e2e/client/docker.go
@@ -9,37 +9,48 @@ import (
"bytes"
"context"
"fmt"
+ "strings"
"testing"
- "github.com/DataDog/test-infra-definitions/components/remote"
+ "github.com/DataDog/test-infra-definitions/components/docker"
"github.com/docker/cli/cli/connhelper"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/stdcopy"
"github.com/stretchr/testify/require"
+
+ "github.com/DataDog/datadog-agent/pkg/util/scrubber"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters"
)
// A Docker client that is connected to an [docker.Deamon].
//
// [docker.Deamon]: https://pkg.go.dev/github.com/DataDog/test-infra-definitions@main/components/datadog/agent/docker#Deamon
type Docker struct {
- t *testing.T
- client *client.Client
+ t *testing.T
+ client *client.Client
+ scrubber *scrubber.Scrubber
}
// NewDocker creates a new instance of Docker
// NOTE: docker+ssh does not support password protected SSH keys.
-func NewDocker(t *testing.T, host remote.HostOutput, privateKeyPath string) (*Docker, error) {
- deamonURL := fmt.Sprintf("ssh://%v@%v", host.Username, host.Address)
+func NewDocker(t *testing.T, dockerOutput docker.ManagerOutput) (*Docker, error) {
+ deamonURL := fmt.Sprintf("ssh://%v@%v", dockerOutput.Host.Username, dockerOutput.Host.Address)
sshOpts := []string{"-o", "StrictHostKeyChecking no"}
+
+ privateKeyPath, err := runner.GetProfile().ParamStore().GetWithDefault(parameters.PrivateKeyPath, "")
+ if err != nil {
+ return nil, err
+ }
if privateKeyPath != "" {
sshOpts = append(sshOpts, "-i", privateKeyPath)
}
helper, err := connhelper.GetConnectionHelperWithSSHOpts(deamonURL, sshOpts)
if err != nil {
- return nil, fmt.Errorf("cannot connect to docker %v: %v", deamonURL, err)
+ return nil, fmt.Errorf("cannot connect to docker %v: %w", deamonURL, err)
}
opts := []client.Opt{
@@ -49,12 +60,13 @@ func NewDocker(t *testing.T, host remote.HostOutput, privateKeyPath string) (*Do
client, err := client.NewClientWithOpts(opts...)
if err != nil {
- return nil, fmt.Errorf("cannot create docker client: %v", err)
+ return nil, fmt.Errorf("cannot create docker client: %w", err)
}
return &Docker{
- t: t,
- client: client,
+ t: t,
+ client: client,
+ scrubber: scrubber.NewWithDefaults(),
}, nil
}
@@ -75,7 +87,11 @@ func (docker *Docker) ExecuteCommandWithErr(containerName string, commands ...st
}
// ExecuteCommandStdoutStdErr executes a command on containerName and returns the output, the error output and an error.
-func (docker *Docker) ExecuteCommandStdoutStdErr(containerName string, commands ...string) (string, string, error) {
+func (docker *Docker) ExecuteCommandStdoutStdErr(containerName string, commands ...string) (stdout string, stderr string, err error) {
+ cmd := strings.Join(commands, " ")
+ scrubbedCommand := docker.scrubber.ScrubLine(cmd) // scrub the command in case it contains secrets
+ docker.t.Logf("Executing command `%s`", scrubbedCommand)
+
context := context.Background()
execConfig := types.ExecConfig{Cmd: commands, AttachStderr: true, AttachStdout: true}
execCreateResp, err := docker.client.ContainerExecCreate(context, containerName, execConfig)
@@ -94,19 +110,12 @@ func (docker *Docker) ExecuteCommandStdoutStdErr(containerName string, commands
execInspectResp, err := docker.client.ContainerExecInspect(context, execCreateResp.ID)
require.NoError(docker.t, err)
- output := outBuf.String()
- errOutput := errBuf.String()
+ stdout = outBuf.String()
+ stderr = errBuf.String()
if execInspectResp.ExitCode != 0 {
- return "", "", fmt.Errorf("error when running command %v on container %v: %v %v", commands, containerName, output, errOutput)
+ return "", "", fmt.Errorf("error when running command %v on container %v:\n exit code: %d\n stdout: %v\n stderr: %v", commands, containerName, execInspectResp.ExitCode, stdout, stderr)
}
- return output, errOutput, err
-}
-
-// GetClient gets the [docker client].
-//
-// [docker client]: https://pkg.go.dev/github.com/docker/docker/client
-func (docker *Docker) GetClient() *client.Client {
- return docker.client
+ return stdout, stderr, err
}
diff --git a/test/new-e2e/pkg/utils/e2e/client/ec2_metadata.go b/test/new-e2e/pkg/utils/e2e/client/ec2_metadata.go
index d268cb552f937..e6b9e371a9b4f 100644
--- a/test/new-e2e/pkg/utils/e2e/client/ec2_metadata.go
+++ b/test/new-e2e/pkg/utils/e2e/client/ec2_metadata.go
@@ -8,48 +8,52 @@ package client
import (
"fmt"
"strings"
+ "testing"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components"
"github.com/DataDog/test-infra-definitions/components/os"
)
// EC2Metadata contains a pointer to a VM and its AWS token
type EC2Metadata struct {
- h *components.RemoteHost
- token string
+ t *testing.T
+ host *Host
+ osFamily os.Family
+ token string
}
const metadataEndPoint = "http://169.254.169.254"
// NewEC2Metadata creates a new [EC2Metadata] given an EC2 [VM]
-func NewEC2Metadata(h *components.RemoteHost) *EC2Metadata {
+func NewEC2Metadata(t *testing.T, h *Host, osFamily os.Family) *EC2Metadata {
var cmd string
- switch h.OSFamily {
+ switch osFamily {
case os.WindowsFamily:
cmd = fmt.Sprintf(`Invoke-RestMethod -Uri "%v/latest/api/token" -Method Put -Headers @{ "X-aws-ec2-metadata-token-ttl-seconds" = "21600" }`, metadataEndPoint)
case os.LinuxFamily:
cmd = fmt.Sprintf(`curl -s -X PUT "%v/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600"`, metadataEndPoint)
default:
- panic(fmt.Sprintf("unsupported OS family: %v", h.OSFamily))
+ panic(fmt.Sprintf("unsupported OS family: %v", osFamily))
}
+ t.Log("Getting EC2 metadata token")
output := h.MustExecute(cmd)
- return &EC2Metadata{h: h, token: output}
+ return &EC2Metadata{osFamily: osFamily, token: output, host: h, t: t}
}
// Get returns EC2 instance name
func (m *EC2Metadata) Get(name string) string {
var cmd string
- switch m.h.OSFamily {
+ switch m.osFamily {
case os.WindowsFamily:
cmd = fmt.Sprintf(`Invoke-RestMethod -Headers @{"X-aws-ec2-metadata-token"="%v"} -Uri "%v/latest/meta-data/%v"`, m.token, metadataEndPoint, name)
case os.LinuxFamily:
cmd = fmt.Sprintf(`curl -s -H "X-aws-ec2-metadata-token: %v" "%v/latest/meta-data/%v"`, m.token, metadataEndPoint, name)
default:
- panic(fmt.Sprintf("unsupported OS family: %v", m.h.OSFamily))
+ panic(fmt.Sprintf("unsupported OS family: %v", m.osFamily))
}
- return strings.TrimRight(m.h.MustExecute(cmd), "\r\n")
+ m.t.Log("Getting EC2 metadata for", name)
+ return strings.TrimRight(m.host.MustExecute(cmd), "\r\n")
}
diff --git a/test/new-e2e/pkg/utils/e2e/client/host.go b/test/new-e2e/pkg/utils/e2e/client/host.go
new file mode 100644
index 0000000000000..16d597805dd1a
--- /dev/null
+++ b/test/new-e2e/pkg/utils/e2e/client/host.go
@@ -0,0 +1,558 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package client
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "net"
+ "net/http"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ oscomp "github.com/DataDog/test-infra-definitions/components/os"
+ "github.com/DataDog/test-infra-definitions/components/remote"
+ "github.com/cenkalti/backoff"
+ "github.com/pkg/sftp"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/crypto/ssh"
+
+ "github.com/DataDog/datadog-agent/pkg/util/scrubber"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional"
+)
+
+const (
+ // Waiting for only 10s as we expect remote to be ready when provisioning
+ sshRetryInterval = 2 * time.Second
+ sshMaxRetries = 20
+)
+
+type buildCommandFn func(command string, envVars EnvVar) string
+
+type convertPathSeparatorFn func(string) string
+
+// A Host client that is connected to an [ssh.Client].
+type Host struct {
+ client *ssh.Client
+
+ context e2e.Context
+ username string
+ host string
+ privateKey []byte
+ privateKeyPassphrase []byte
+ buildCommand buildCommandFn
+ convertPathSeparator convertPathSeparatorFn
+ osFamily oscomp.Family
+ // as per the documentation of http.Transport: "Transports should be reused instead of created as needed."
+ httpTransport *http.Transport
+ scrubber *scrubber.Scrubber
+}
+
+// NewHost creates a new ssh client to connect to a remote host with
+// reconnect retry logic
+func NewHost(context e2e.Context, hostOutput remote.HostOutput) (*Host, error) {
+ var privateSSHKey []byte
+ privateKeyPath, err := runner.GetProfile().ParamStore().GetWithDefault(parameters.PrivateKeyPath, "")
+ if err != nil {
+ return nil, err
+ }
+
+ privateKeyPassword, err := runner.GetProfile().SecretStore().GetWithDefault(parameters.PrivateKeyPassword, "")
+ if err != nil {
+ return nil, err
+ }
+
+ if privateKeyPath != "" {
+ privateSSHKey, err = os.ReadFile(privateKeyPath)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ host := &Host{
+ context: context,
+ username: hostOutput.Username,
+ host: fmt.Sprintf("%s:%d", hostOutput.Address, 22),
+ privateKey: privateSSHKey,
+ privateKeyPassphrase: []byte(privateKeyPassword),
+ buildCommand: buildCommandFactory(hostOutput.OSFamily),
+ convertPathSeparator: convertPathSeparatorFactory(hostOutput.OSFamily),
+ osFamily: hostOutput.OSFamily,
+ scrubber: scrubber.NewWithDefaults(),
+ }
+
+ host.httpTransport = host.newHTTPTransport()
+
+ err = host.Reconnect()
+ return host, err
+}
+
+// Reconnect closes the current ssh client and creates a new one, with retries.
+func (h *Host) Reconnect() error {
+ h.context.T().Log("Reconnecting to host")
+ if h.client != nil {
+ _ = h.client.Close()
+ }
+ return backoff.Retry(func() error {
+ client, err := getSSHClient(h.username, h.host, h.privateKey, h.privateKeyPassphrase)
+ if err != nil {
+ return err
+ }
+ h.client = client
+ return nil
+ }, backoff.WithMaxRetries(backoff.NewConstantBackOff(sshRetryInterval), sshMaxRetries))
+}
+
+// Execute executes a command and returns an error if any.
+func (h *Host) Execute(command string, options ...ExecuteOption) (string, error) {
+ params, err := optional.MakeParams(options...)
+ if err != nil {
+ return "", err
+ }
+ command = h.buildCommand(command, params.EnvVariables)
+ return h.executeAndReconnectOnError(command)
+}
+
+func (h *Host) executeAndReconnectOnError(command string) (string, error) {
+ scrubbedCommand := h.scrubber.ScrubLine(command) // scrub the command in case it contains secrets
+ h.context.T().Logf("%s - %s - Executing command `%s`", time.Now().Format("02-01-2006 15:04:05"), h.context.T().Name(), scrubbedCommand)
+ stdout, err := execute(h.client, command)
+ if err != nil && strings.Contains(err.Error(), "failed to create session:") {
+ err = h.Reconnect()
+ if err != nil {
+ return "", err
+ }
+ stdout, err = execute(h.client, command)
+ }
+ if err != nil {
+ return "", fmt.Errorf("%v: %w", stdout, err)
+ }
+ return stdout, err
+}
+
+// MustExecute executes a command and requires no error.
+func (h *Host) MustExecute(command string, options ...ExecuteOption) string {
+ stdout, err := h.Execute(command, options...)
+ require.NoError(h.context.T(), err)
+ return stdout
+}
+
+// CopyFileFromFS creates a sftp session and copy a single embedded file to the remote host through SSH
+func (h *Host) CopyFileFromFS(fs fs.FS, src, dst string) {
+ h.context.T().Logf("Copying file from local %s to remote %s", src, dst)
+ dst = h.convertPathSeparator(dst)
+ sftpClient := h.getSFTPClient()
+ defer sftpClient.Close()
+ file, err := fs.Open(src)
+ require.NoError(h.context.T(), err)
+ defer file.Close()
+ err = copyFileFromIoReader(sftpClient, file, dst)
+ require.NoError(h.context.T(), err)
+}
+
+// CopyFile creates a sftp session and copy a single file to the remote host through SSH
+func (h *Host) CopyFile(src string, dst string) {
+ h.context.T().Logf("Copying file from local %s to remote %s", src, dst)
+ dst = h.convertPathSeparator(dst)
+ sftpClient := h.getSFTPClient()
+ defer sftpClient.Close()
+ err := copyFile(sftpClient, src, dst)
+ require.NoError(h.context.T(), err)
+}
+
+// CopyFolder create a sftp session and copy a folder to remote host through SSH
+func (h *Host) CopyFolder(srcFolder string, dstFolder string) error {
+ h.context.T().Logf("Copying folder from local %s to remote %s", srcFolder, dstFolder)
+ dstFolder = h.convertPathSeparator(dstFolder)
+ sftpClient := h.getSFTPClient()
+ defer sftpClient.Close()
+ return copyFolder(sftpClient, srcFolder, dstFolder)
+}
+
+// FileExists create a sftp session to and returns true if the file exists and is a regular file
+func (h *Host) FileExists(path string) (bool, error) {
+ h.context.T().Logf("Checking if file exists: %s", path)
+ path = h.convertPathSeparator(path)
+ sftpClient := h.getSFTPClient()
+ defer sftpClient.Close()
+
+ info, err := sftpClient.Lstat(path)
+ if err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ return false, nil
+ }
+ return false, err
+ }
+
+ return info.Mode().IsRegular(), nil
+}
+
+// GetFile create a sftp session and copy a single file from the remote host through SSH
+func (h *Host) GetFile(src string, dst string) error {
+ h.context.T().Logf("Copying file from remote %s to local %s", src, dst)
+ dst = h.convertPathSeparator(dst)
+ sftpClient := h.getSFTPClient()
+ defer sftpClient.Close()
+
+ // remote
+ fsrc, err := sftpClient.Open(src)
+ if err != nil {
+ return err
+ }
+ defer fsrc.Close()
+
+ // local
+ fdst, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer fdst.Close()
+
+ _, err = fsrc.WriteTo(fdst)
+ return err
+}
+
+// ReadFile reads the content of the file, return bytes read and error if any
+func (h *Host) ReadFile(path string) ([]byte, error) {
+ h.context.T().Logf("Reading file at %s", path)
+ path = h.convertPathSeparator(path)
+ sftpClient := h.getSFTPClient()
+ defer sftpClient.Close()
+
+ f, err := sftpClient.Open(path)
+ if err != nil {
+ return nil, err
+ }
+
+ var content bytes.Buffer
+ _, err = io.Copy(&content, f)
+ if err != nil {
+ return content.Bytes(), err
+ }
+
+ return content.Bytes(), nil
+}
+
+// WriteFile write content to the file and returns the number of bytes written and error if any
+func (h *Host) WriteFile(path string, content []byte) (int64, error) {
+ h.context.T().Logf("Writing to file at %s", path)
+ path = h.convertPathSeparator(path)
+ sftpClient := h.getSFTPClient()
+ defer sftpClient.Close()
+
+ f, err := sftpClient.Create(path)
+ if err != nil {
+ return 0, err
+ }
+ defer f.Close()
+
+ reader := bytes.NewReader(content)
+ return io.Copy(f, reader)
+}
+
+// AppendFile append content to the file and returns the number of bytes appened and error if any
+func (h *Host) AppendFile(os, path string, content []byte) (int64, error) {
+ h.context.T().Logf("Appending to file at %s", path)
+ path = h.convertPathSeparator(path)
+ if os == "linux" {
+ return h.appendWithSudo(path, content)
+ }
+ return h.appendWithSftp(path, content)
+}
+
+// ReadDir returns list of directory entries in path
+func (h *Host) ReadDir(path string) ([]fs.DirEntry, error) {
+ h.context.T().Logf("Reading filesystem at %s", path)
+ path = h.convertPathSeparator(path)
+ sftpClient := h.getSFTPClient()
+
+ defer sftpClient.Close()
+
+ infos, err := sftpClient.ReadDir(path)
+ if err != nil {
+ return nil, err
+ }
+
+ entries := make([]fs.DirEntry, 0, len(infos))
+ for _, info := range infos {
+ entry := fs.FileInfoToDirEntry(info)
+ entries = append(entries, entry)
+ }
+
+ return entries, nil
+}
+
+// Lstat returns a FileInfo structure describing path.
+// if path is a symbolic link, the FileInfo structure describes the symbolic link.
+func (h *Host) Lstat(path string) (fs.FileInfo, error) {
+ h.context.T().Logf("Reading file info of %s", path)
+ path = h.convertPathSeparator(path)
+ sftpClient := h.getSFTPClient()
+ defer sftpClient.Close()
+
+ return sftpClient.Lstat(path)
+}
+
+// MkdirAll creates the specified directory along with any necessary parents.
+// If the path is already a directory, does nothing and returns nil.
+// Otherwise returns an error if any.
+func (h *Host) MkdirAll(path string) error {
+ h.context.T().Logf("Creating directory %s", path)
+ path = h.convertPathSeparator(path)
+ sftpClient := h.getSFTPClient()
+ defer sftpClient.Close()
+
+ return sftpClient.MkdirAll(path)
+}
+
+// Remove removes the specified file or directory.
+// Returns an error if file or directory does not exist, or if the directory is not empty.
+func (h *Host) Remove(path string) error {
+ h.context.T().Logf("Removing %s", path)
+ path = h.convertPathSeparator(path)
+ sftpClient := h.getSFTPClient()
+ defer sftpClient.Close()
+
+ return sftpClient.Remove(path)
+}
+
+// RemoveAll recursively removes all files/folders in the specified directory.
+// Returns an error if the directory does not exist.
+func (h *Host) RemoveAll(path string) error {
+ h.context.T().Logf("Removing all under %s", path)
+ path = h.convertPathSeparator(path)
+ sftpClient := h.getSFTPClient()
+ defer sftpClient.Close()
+
+ return sftpClient.RemoveAll(path)
+}
+
+// DialPort creates a connection from the remote host to its `port`.
+func (h *Host) DialPort(port uint16) (net.Conn, error) {
+ h.context.T().Logf("Creating connection to host port %d", port)
+ address := fmt.Sprintf("127.0.0.1:%d", port)
+ protocol := "tcp"
+ // TODO add context to host
+ context := context.Background()
+ connection, err := h.client.DialContext(context, protocol, address)
+ if err != nil {
+ err = h.Reconnect()
+ if err != nil {
+ return nil, err
+ }
+ connection, err = h.client.DialContext(context, protocol, address)
+ }
+ return connection, err
+}
+
+// GetTmpFolder returns the temporary folder path for the host
+func (h *Host) GetTmpFolder() (string, error) {
+ switch osFamily := h.osFamily; osFamily {
+ case oscomp.WindowsFamily:
+ return h.Execute("echo %TEMP%")
+ case oscomp.LinuxFamily:
+ return "/tmp", nil
+ default:
+ return "", errors.ErrUnsupported
+ }
+}
+
+// GetLogsFolder returns the logs folder path for the host
+func (h *Host) GetLogsFolder() (string, error) {
+ switch osFamily := h.osFamily; osFamily {
+ case oscomp.WindowsFamily:
+ return `C:\ProgramData\Datadog\logs`, nil
+ case oscomp.LinuxFamily:
+ return "/var/log/datadog/", nil
+ case oscomp.MacOSFamily:
+ return "/opt/datadog-agent/logs", nil
+ default:
+ return "", errors.ErrUnsupported
+ }
+}
+
+// appendWithSudo appends content to the file using sudo tee for Linux environment
+func (h *Host) appendWithSudo(path string, content []byte) (int64, error) {
+ cmd := fmt.Sprintf("echo '%s' | sudo tee -a %s", string(content), path)
+ output, err := h.Execute(cmd)
+ if err != nil {
+ return 0, err
+ }
+ return int64(len(output)), nil
+}
+
+// appendWithSftp appends content to the file using sftp for Windows environment
+func (h *Host) appendWithSftp(path string, content []byte) (int64, error) {
+ sftpClient := h.getSFTPClient()
+ defer sftpClient.Close()
+
+ // Open the file in append mode and create it if it doesn't exist
+ f, err := sftpClient.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY)
+ if err != nil {
+ return 0, err
+ }
+ defer f.Close()
+
+ reader := bytes.NewReader(content)
+ written, err := io.Copy(f, reader)
+ if err != nil {
+ return 0, err
+ }
+
+ return written, nil
+}
+
+func (h *Host) getSFTPClient() *sftp.Client {
+ sftpClient, err := sftp.NewClient(h.client, sftp.UseConcurrentWrites(true))
+ if err != nil {
+ err = h.Reconnect()
+ require.NoError(h.context.T(), err)
+ sftpClient, err = sftp.NewClient(h.client, sftp.UseConcurrentWrites(true))
+ require.NoError(h.context.T(), err)
+ }
+ return sftpClient
+}
+
+// HTTPTransport returns an http.RoundTripper which dials the remote host.
+// This transport can only reach the host.
+func (h *Host) HTTPTransport() http.RoundTripper {
+ return h.httpTransport
+}
+
+// NewHTTPClient returns an *http.Client which dials the remote host.
+// This client can only reach the host.
+func (h *Host) NewHTTPClient() *http.Client {
+ return &http.Client{
+ Transport: h.httpTransport,
+ }
+}
+
+func (h *Host) newHTTPTransport() *http.Transport {
+ return &http.Transport{
+ DialContext: func(_ context.Context, _, addr string) (net.Conn, error) {
+ hostname, port, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ // best effort to detect logic errors around the hostname
+ // if the hostname provided to dial is not one of those, return an error as
+ // it's likely an incorrect use of this transport
+ validHostnames := map[string]struct{}{
+ "": {},
+ "localhost": {},
+ "127.0.0.1": {},
+ h.client.RemoteAddr().String(): {},
+ }
+
+ if _, ok := validHostnames[hostname]; !ok {
+ return nil, fmt.Errorf("request hostname %s does not match any valid host name", hostname)
+ }
+
+ portInt, err := strconv.Atoi(port)
+ if err != nil {
+ return nil, err
+ }
+ return h.DialPort(uint16(portInt))
+ },
+ // skip verify like we do when reaching out to the agent
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ // from http.DefaultTransport
+ Proxy: http.ProxyFromEnvironment,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ }
+}
+
+func buildCommandFactory(osFamily oscomp.Family) buildCommandFn {
+ if osFamily == oscomp.WindowsFamily {
+ return buildCommandOnWindows
+ }
+ return buildCommandOnLinuxAndMacOS
+}
+
+func buildCommandOnWindows(command string, envVar EnvVar) string {
+ cmd := ""
+
+ // Set $ErrorActionPreference to 'Stop' to cause PowerShell to stop on an error instead
+ // of the default 'Continue' behavior.
+ // This also ensures that Execute() will return an error when a command fails.
+ // Note that this only applies to PowerShell commands, not to external commands or native binaries.
+ //
+ // For example, if the command is (Get-Service -Name ddnpm).Status and the service does not exist,
+ // then by default the command will print an error but the exit code will be 0 and Execute() will not return an error.
+ // By setting $ErrorActionPreference to 'Stop', Execute() will return an error as one would expect.
+ //
+ // Thus, we default to 'Stop' to make sure that an error is raised when the command fails instead of failing silently.
+ // Commands that this causes issues for will be immediately noticed and can be adjusted as needed, instead of
+ // silent errors going unnoticed and affecting test results.
+ //
+ // To ignore errors, prefix command with $ErrorActionPreference='Continue' or use -ErrorAction Continue
+ // https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_preference_variables#erroractionpreference
+ cmd += "$ErrorActionPreference='Stop'; "
+
+ for envName, envValue := range envVar {
+ cmd += fmt.Sprintf("$env:%s='%s'; ", envName, envValue)
+ }
+ // By default, powershell will just exit with 0 or 1, so we call exit to preserve
+ // the exit code of the command provided by the caller.
+ // The caller's command may not modify LASTEXITCODE, so manually reset it first,
+ // then only call exit if the command provided by the caller fails.
+ //
+ // https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_automatic_variables?#lastexitcode
+ // https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_powershell_exe?#-command
+ cmd += fmt.Sprintf("$LASTEXITCODE=0; %s; if (-not $?) { exit $LASTEXITCODE }", command)
+ // NOTE: Do not add more commands after the command provided by the caller.
+ //
+ // `$ErrorActionPreference`='Stop' only applies to PowerShell commands, not to
+ // external commands or native binaries, thus later commands will still be executed.
+ // Additional commands will overwrite the exit code of the command provided by
+ // the caller which may cause errors to be missed/ignored.
+ // If it becomes necessary to run more commands after the command provided by the
+ // caller, we will need to find a way to ensure that the exit code of the command
+ // provided by the caller is preserved.
+
+ return cmd
+}
+
+func buildCommandOnLinuxAndMacOS(command string, envVar EnvVar) string {
+ cmd := ""
+ for envName, envValue := range envVar {
+ cmd += fmt.Sprintf("%s='%s' ", envName, envValue)
+ }
+ cmd += command
+ return cmd
+}
+
+// convertToForwardSlashOnWindows replaces backslashes in the path with forward slashes for Windows remote hosts.
+// The path is unchanged for non-Windows remote hosts.
+//
+// This is necessary for remote paths because the sftp package only supports forward slashes, regardless of the local OS.
+// The Windows SSH implementation does this conversion, too. Though we have an advantage in that we can check the OSFamily.
+// https://github.com/PowerShell/openssh-portable/blob/59aba65cf2e2f423c09d12ad825c3b32a11f408f/scp.c#L636-L650
+func convertPathSeparatorFactory(osFamily oscomp.Family) convertPathSeparatorFn {
+ if osFamily == oscomp.WindowsFamily {
+ return func(s string) string {
+ return strings.ReplaceAll(s, "\\", "/")
+ }
+ }
+ return func(s string) string {
+ return s
+ }
+}
diff --git a/test/new-e2e/pkg/utils/e2e/client/host_params.go b/test/new-e2e/pkg/utils/e2e/client/host_params.go
new file mode 100644
index 0000000000000..93172fc53c1cf
--- /dev/null
+++ b/test/new-e2e/pkg/utils/e2e/client/host_params.go
@@ -0,0 +1,39 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package client
+
+import (
+ "fmt"
+ "regexp"
+)
+
+// EnvVar alias to map representing env variables
+type EnvVar map[string]string
+
+var envVarNameRegexp = regexp.MustCompile("^[a-zA-Z_]+[a-zA-Z0-9_]*$")
+
+// ExecuteParams contains parameters for VM.Execute commands
+type ExecuteParams struct {
+ EnvVariables EnvVar
+}
+
+// ExecuteOption alias to a functional option changing a given Params instance
+type ExecuteOption func(*ExecuteParams) error
+
+// WithEnvVariables allows to set env variable for the command that will be executed
+func WithEnvVariables(env EnvVar) ExecuteOption {
+ return func(p *ExecuteParams) error {
+ p.EnvVariables = make(EnvVar, len(env))
+ for envName, envVar := range env {
+ if match := envVarNameRegexp.MatchString(envName); match {
+ p.EnvVariables[envName] = envVar
+ } else {
+ return fmt.Errorf("variable name %s does not have a valid format", envName)
+ }
+ }
+ return nil
+ }
+}
diff --git a/test/new-e2e/pkg/utils/e2e/client/host_ssh.go b/test/new-e2e/pkg/utils/e2e/client/host_ssh.go
new file mode 100644
index 0000000000000..79a53dd4b87e2
--- /dev/null
+++ b/test/new-e2e/pkg/utils/e2e/client/host_ssh.go
@@ -0,0 +1,137 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package client
+
+import (
+ "fmt"
+ "github.com/pkg/sftp"
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/crypto/ssh/agent"
+ "io"
+ "net"
+ "os"
+ "path"
+ "strings"
+)
+
+func execute(sshClient *ssh.Client, command string) (string, error) {
+ session, err := sshClient.NewSession()
+ if err != nil {
+ return "", fmt.Errorf("failed to create session: %v", err)
+ }
+ stdout, err := session.CombinedOutput(command)
+ return string(stdout), err
+}
+
+func getSSHClient(user, host string, privateKey, privateKeyPassphrase []byte) (*ssh.Client, error) {
+ var auth ssh.AuthMethod
+
+ if len(privateKey) > 0 {
+ var privateKeyAuth ssh.Signer
+ var err error
+
+ if len(privateKeyPassphrase) > 0 {
+ privateKeyAuth, err = ssh.ParsePrivateKeyWithPassphrase(privateKey, privateKeyPassphrase)
+ } else {
+ privateKeyAuth, err = ssh.ParsePrivateKey(privateKey)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ auth = ssh.PublicKeys(privateKeyAuth)
+ } else {
+ // Use the ssh agent
+ conn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK"))
+ if err != nil {
+ return nil, fmt.Errorf("no ssh key provided and cannot connect to the ssh agent: %v", err)
+ }
+ defer conn.Close()
+ sshAgent := agent.NewClient(conn)
+ auth = ssh.PublicKeysCallback(sshAgent.Signers)
+ }
+
+ sshConfig := &ssh.ClientConfig{
+ User: user,
+ Auth: []ssh.AuthMethod{auth},
+ HostKeyCallback: ssh.InsecureIgnoreHostKey(),
+ }
+
+ client, err := ssh.Dial("tcp", host, sshConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ session, err := client.NewSession()
+ if err != nil {
+ client.Close()
+ return nil, err
+ }
+ err = session.Close()
+ if err != nil {
+ return nil, err
+ }
+
+ return client, nil
+}
+
+func copyFileFromIoReader(sftpClient *sftp.Client, srcFile io.Reader, dst string) error {
+ lastSlashIdx := strings.LastIndex(dst, "/")
+ if lastSlashIdx >= 0 {
+ // Ensure the target directory exists
+ // otherwise sftpClient.Create will return an error
+ err := sftpClient.MkdirAll(dst[:lastSlashIdx])
+ if err != nil {
+ return err
+ }
+ }
+
+ dstFile, err := sftpClient.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer dstFile.Close()
+
+ if _, err := dstFile.ReadFrom(srcFile); err != nil {
+ return err
+ }
+ return nil
+}
+
+func copyFile(sftpClient *sftp.Client, src string, dst string) error {
+ srcFile, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer srcFile.Close()
+ return copyFileFromIoReader(sftpClient, srcFile, dst)
+}
+
+func copyFolder(sftpClient *sftp.Client, srcFolder string, dstFolder string) error {
+ folderContent, err := os.ReadDir(srcFolder)
+ if err != nil {
+ return err
+ }
+
+ if err := sftpClient.MkdirAll(dstFolder); err != nil {
+ return err
+ }
+
+ for _, d := range folderContent {
+ if !d.IsDir() {
+ err := copyFile(sftpClient, path.Join(srcFolder, d.Name()), path.Join(dstFolder, d.Name()))
+ if err != nil {
+ return err
+ }
+ } else {
+ err = copyFolder(sftpClient, path.Join(srcFolder, d.Name()), path.Join(dstFolder, d.Name()))
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/test/new-e2e/pkg/utils/e2e/client/k8s.go b/test/new-e2e/pkg/utils/e2e/client/k8s.go
new file mode 100644
index 0000000000000..4ec9456349962
--- /dev/null
+++ b/test/new-e2e/pkg/utils/e2e/client/k8s.go
@@ -0,0 +1,72 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+package client
+
+import (
+ "context"
+ "strings"
+
+ corev1 "k8s.io/api/core/v1"
+ kubeClient "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/remotecommand"
+)
+
+// KubernetesClient is a wrapper around the k8s client library and provides convenience methods for interacting with a
+// k8s cluster
+type KubernetesClient struct {
+ K8sConfig *rest.Config
+ K8sClient kubeClient.Interface
+}
+
+// NewKubernetesClient creates a new KubernetesClient
+func NewKubernetesClient(config *rest.Config) (*KubernetesClient, error) {
+ // Create client
+ k8sClient, err := kubeClient.NewForConfig(config)
+ if err != nil {
+ return nil, err
+ }
+
+ return &KubernetesClient{
+ K8sConfig: config,
+ K8sClient: k8sClient,
+ }, nil
+}
+
+// PodExec execs into a given namespace/pod and returns the output for the given command
+func (k *KubernetesClient) PodExec(namespace, pod, container string, cmd []string) (stdout, stderr string, err error) {
+ req := k.K8sClient.CoreV1().RESTClient().Post().Resource("pods").Namespace(namespace).Name(pod).SubResource("exec")
+ option := &corev1.PodExecOptions{
+ Stdin: false,
+ Stdout: true,
+ Stderr: true,
+ TTY: false,
+ Container: container,
+ Command: cmd,
+ }
+
+ req.VersionedParams(
+ option,
+ scheme.ParameterCodec,
+ )
+
+ exec, err := remotecommand.NewSPDYExecutor(k.K8sConfig, "POST", req.URL())
+ if err != nil {
+ return "", "", err
+ }
+
+ var stdoutSb, stderrSb strings.Builder
+ err = exec.StreamWithContext(context.Background(), remotecommand.StreamOptions{
+ Stdout: &stdoutSb,
+ Stderr: &stderrSb,
+ })
+ if err != nil {
+ return "", "", err
+ }
+
+ return stdoutSb.String(), stderrSb.String(), nil
+}
diff --git a/test/new-e2e/pkg/utils/infra/datadog_event_sender.go b/test/new-e2e/pkg/utils/infra/datadog_event_sender.go
new file mode 100644
index 0000000000000..1c921a64019e0
--- /dev/null
+++ b/test/new-e2e/pkg/utils/infra/datadog_event_sender.go
@@ -0,0 +1,95 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package infra implements utilities to interact with a Pulumi infrastructure
+package infra
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters"
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadogV1"
+)
+
+type datadogEventSender interface {
+ SendEvent(body datadogV1.EventCreateRequest)
+}
+
+type datadogEventSenderImpl struct {
+ ctx context.Context
+ eventsAPI *datadogV1.EventsApi
+
+ logger io.Writer
+
+ initOnce sync.Once
+ isReady bool
+}
+
+var _ datadogEventSender = &datadogEventSenderImpl{}
+
+func newDatadogEventSender(logger io.Writer) *datadogEventSenderImpl {
+ return &datadogEventSenderImpl{
+ logger: logger,
+ initOnce: sync.Once{},
+ isReady: false,
+ }
+}
+
+func (d *datadogEventSenderImpl) initDatadogEventSender() error {
+ apiKey, err := runner.GetProfile().SecretStore().GetWithDefault(parameters.APIKey, "")
+ if err != nil {
+ fmt.Fprintf(d.logger, "error when getting API key from parameter store: %v", err)
+ return err
+ }
+
+ if apiKey == "" {
+ fmt.Fprintf(d.logger, "Skipping sending event because API key is empty")
+ return errors.New("empty API key")
+ }
+
+ d.ctx = context.WithValue(context.Background(), datadog.ContextAPIKeys, map[string]datadog.APIKey{
+ "apiKeyAuth": {
+ Key: apiKey,
+ },
+ })
+
+ configuration := datadog.NewConfiguration()
+ apiClient := datadog.NewAPIClient(configuration)
+ eventsAPI := datadogV1.NewEventsApi(apiClient)
+
+ d.eventsAPI = eventsAPI
+
+ d.isReady = true
+
+ return nil
+}
+
+func (d *datadogEventSenderImpl) SendEvent(body datadogV1.EventCreateRequest) {
+ d.initOnce.Do(func() {
+ err := d.initDatadogEventSender()
+ if err != nil {
+ fmt.Fprintf(d.logger, "error when initializing `datadogEventSender`: %v", err)
+ d.isReady = false
+ }
+ })
+
+ if !d.isReady {
+ return
+ }
+
+ _, response, err := d.eventsAPI.CreateEvent(d.ctx, body)
+
+ if err != nil {
+ fmt.Fprintf(d.logger, "error when calling `EventsApi.CreateEvent`: %v", err)
+ fmt.Fprintf(d.logger, "Full HTTP response: %v\n", response)
+ return
+ }
+}
diff --git a/test/new-e2e/pkg/utils/infra/retriable_errors.go b/test/new-e2e/pkg/utils/infra/retriable_errors.go
new file mode 100644
index 0000000000000..b940cbcd2e064
--- /dev/null
+++ b/test/new-e2e/pkg/utils/infra/retriable_errors.go
@@ -0,0 +1,59 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package infra implements utilities to interact with a Pulumi infrastructure
+package infra
+
+// RetryType is an enum to specify the type of retry to perform
+type RetryType string
+
+const (
+ ReUp RetryType = "ReUp" // ReUp retries the up operation
+ ReCreate RetryType = "ReCreate" // ReCreate retries the up operation after destroying the stack
+ NoRetry RetryType = "NoRetry" // NoRetry does not retry the up operation
+)
+
+type knownError struct {
+ errorMessage string
+ retryType RetryType
+}
+
+func getKnownErrors() []knownError {
+ // Add here errors that are known to be flakes and that should be retried
+ return []knownError{
+ {
+ errorMessage: `i\/o timeout`,
+ retryType: ReCreate,
+ },
+ {
+ // https://datadoghq.atlassian.net/browse/ADXT-1
+ errorMessage: `failed attempts: dial tcp :22: connect: connection refused`,
+ retryType: ReCreate,
+ },
+ {
+ // https://datadoghq.atlassian.net/browse/ADXT-295
+ errorMessage: `Resource provider reported that the resource did not exist while updating`,
+ retryType: ReCreate,
+ },
+ {
+ // https://datadoghq.atlassian.net/browse/ADXT-558
+ // https://datadoghq.atlassian.net/browse/ADXT-713
+ errorMessage: `Process exited with status \d+: running " sudo cloud-init status --wait"`,
+ retryType: ReCreate,
+ },
+ {
+ errorMessage: `waiting for ECS Service .+fakeintake-ecs.+ create: timeout while waiting for state to become 'tfSTABLE'`,
+ retryType: ReCreate,
+ },
+ {
+ errorMessage: `error while waiting for fakeintake`,
+ retryType: ReCreate,
+ },
+ {
+ errorMessage: `ssh: handshake failed: ssh: unable to authenticate`,
+ retryType: ReCreate,
+ },
+ }
+}
diff --git a/test/new-e2e/pkg/utils/infra/stack_manager.go b/test/new-e2e/pkg/utils/infra/stack_manager.go
index e7d9ddd7fecb8..15d9e44c2139b 100644
--- a/test/new-e2e/pkg/utils/infra/stack_manager.go
+++ b/test/new-e2e/pkg/utils/infra/stack_manager.go
@@ -12,14 +12,14 @@ import (
"fmt"
"io"
"os"
+ "regexp"
"runtime"
"strings"
"sync"
"time"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters"
- "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/common"
+
"github.com/DataDog/datadog-api-client-go/v2/api/datadogV1"
"github.com/pulumi/pulumi/sdk/v3/go/auto"
"github.com/pulumi/pulumi/sdk/v3/go/auto/debug"
@@ -29,16 +29,23 @@ import (
"github.com/pulumi/pulumi/sdk/v3/go/common/tokens"
"github.com/pulumi/pulumi/sdk/v3/go/common/workspace"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters"
)
const (
nameSep = "-"
e2eWorkspaceDirectory = "dd-e2e-workspace"
- stackUpTimeout = 60 * time.Minute
- stackDestroyTimeout = 60 * time.Minute
- stackDeleteTimeout = 20 * time.Minute
- stackUpRetry = 2
+ defaultStackUpTimeout time.Duration = 60 * time.Minute
+ defaultStackCancelTimeout time.Duration = 10 * time.Minute
+ defaultStackDestroyTimeout time.Duration = 60 * time.Minute
+ defaultStackForceRemoveTimeout time.Duration = 20 * time.Minute
+ defaultStackRemoveTimeout time.Duration = 10 * time.Minute
+ stackUpMaxRetry = 2
+ stackDestroyMaxRetry = 2
+ stackRemoveMaxRetry = 2
)
var (
@@ -50,9 +57,16 @@ var (
initStackManager sync.Once
)
+// RetryStrategyFromFn is a function that given the current error and the number of retries, returns the type of retry to perform and a list of options to modify the configuration
+type RetryStrategyFromFn func(error, int) (RetryType, []GetStackOption)
+
// StackManager handles
type StackManager struct {
- stacks *safeStackMap
+ stacks *safeStackMap
+ knownErrors []knownError
+
+ // GetRetryStrategyFrom defines how to handle retries. By default points to StackManager.getRetryStrategyFrom but can be overridden
+ GetRetryStrategyFrom RetryStrategyFromFn
}
type safeStackMap struct {
@@ -103,16 +117,32 @@ func GetStackManager() *StackManager {
}
func newStackManager() (*StackManager, error) {
- return &StackManager{
- stacks: newSafeStackMap(),
- }, nil
+ sm := &StackManager{
+ stacks: newSafeStackMap(),
+ knownErrors: getKnownErrors(),
+ }
+ sm.GetRetryStrategyFrom = sm.getRetryStrategyFrom
+
+ return sm, nil
}
// GetStack creates or return a stack based on stack name and config, if error occurs during stack creation it destroy all the resources created
-func (sm *StackManager) GetStack(ctx context.Context, name string, config runner.ConfigMap, deployFunc pulumi.RunFunc, failOnMissing bool) (*auto.Stack, auto.UpResult, error) {
- stack, upResult, err := sm.getStack(ctx, name, config, deployFunc, failOnMissing, nil)
+func (sm *StackManager) GetStack(ctx context.Context, name string, config runner.ConfigMap, deployFunc pulumi.RunFunc, failOnMissing bool) (_ *auto.Stack, _ auto.UpResult, err error) {
+ defer func() {
+ if err != nil {
+ err = common.InternalError{Err: err}
+ }
+ }()
+
+ stack, upResult, err := sm.getStack(
+ ctx,
+ name,
+ deployFunc,
+ WithConfigMap(config),
+ WithFailOnMissing(failOnMissing),
+ )
if err != nil {
- errDestroy := sm.deleteStack(ctx, name, stack, nil)
+ errDestroy := sm.destroyAndRemoveStack(ctx, name, stack, nil, nil)
if errDestroy != nil {
return stack, upResult, errors.Join(err, errDestroy)
}
@@ -121,19 +151,93 @@ func (sm *StackManager) GetStack(ctx context.Context, name string, config runner
return stack, upResult, err
}
+type getStackParams struct {
+ Config runner.ConfigMap
+ FailOnMissing bool
+ LogWriter io.Writer
+ DatadogEventSender datadogEventSender
+ UpTimeout time.Duration
+ DestroyTimeout time.Duration
+ CancelTimeout time.Duration
+}
+
+// GetStackOption is a function that sets a parameter for GetStack function
+type GetStackOption func(*getStackParams)
+
+// WithConfigMap sets the configuration map for the stack
+func WithConfigMap(config runner.ConfigMap) GetStackOption {
+ return func(p *getStackParams) {
+ p.Config = config
+ }
+}
+
+// WithFailOnMissing sets the failOnMissing flag for the stack
+func WithFailOnMissing(failOnMissing bool) GetStackOption {
+ return func(p *getStackParams) {
+ p.FailOnMissing = failOnMissing
+ }
+}
+
+// WithLogWriter sets the log writer for the stack
+func WithLogWriter(logWriter io.Writer) GetStackOption {
+ return func(p *getStackParams) {
+ p.LogWriter = logWriter
+ }
+}
+
+// WithDatadogEventSender sets the datadog event sender for the stack
+func WithDatadogEventSender(datadogEventSender datadogEventSender) GetStackOption {
+ return func(p *getStackParams) {
+ p.DatadogEventSender = datadogEventSender
+ }
+}
+
+// WithUpTimeout sets the up timeout for the stack
+func WithUpTimeout(upTimeout time.Duration) GetStackOption {
+ return func(p *getStackParams) {
+ p.UpTimeout = upTimeout
+ }
+}
+
+// WithDestroyTimeout sets the destroy timeout for the stack
+func WithDestroyTimeout(destroyTimeout time.Duration) GetStackOption {
+ return func(p *getStackParams) {
+ p.DestroyTimeout = destroyTimeout
+ }
+}
+
+// WithCancelTimeout sets the cancel timeout for the stack
+func WithCancelTimeout(cancelTimeout time.Duration) GetStackOption {
+ return func(p *getStackParams) {
+ p.CancelTimeout = cancelTimeout
+ }
+}
+
// GetStackNoDeleteOnFailure creates or return a stack based on stack name and config, if error occurs during stack creation, it will not destroy the created resources. Using this can lead to resource leaks.
-func (sm *StackManager) GetStackNoDeleteOnFailure(ctx context.Context, name string, config runner.ConfigMap, deployFunc pulumi.RunFunc, failOnMissing bool, logWriter io.Writer) (*auto.Stack, auto.UpResult, error) {
- return sm.getStack(ctx, name, config, deployFunc, failOnMissing, logWriter)
+func (sm *StackManager) GetStackNoDeleteOnFailure(ctx context.Context, name string, deployFunc pulumi.RunFunc, options ...GetStackOption) (_ *auto.Stack, _ auto.UpResult, err error) {
+ defer func() {
+ if err != nil {
+ err = common.InternalError{Err: err}
+ }
+ }()
+
+ return sm.getStack(ctx, name, deployFunc, options...)
}
// DeleteStack safely deletes a stack
-func (sm *StackManager) DeleteStack(ctx context.Context, name string, logWriter io.Writer) error {
+func (sm *StackManager) DeleteStack(ctx context.Context, name string, logWriter io.Writer) (err error) {
+ defer func() {
+ if err != nil {
+ err = common.InternalError{Err: err}
+ }
+ }()
+
stack, ok := sm.stacks.Get(name)
if !ok {
// Build configuration from profile
profile := runner.GetProfile()
stackName := buildStackName(profile.NamePrefix(), name)
- workspace, err := buildWorkspace(ctx, profile, stackName, func(ctx *pulumi.Context) error { return nil })
+ workspace, err := buildWorkspace(ctx, profile, stackName, func(*pulumi.Context) error { return nil })
if err != nil {
return err
}
@@ -146,18 +250,24 @@ func (sm *StackManager) DeleteStack(ctx context.Context, name string, logWriter
stack = &newStack
}
- return sm.deleteStack(ctx, name, stack, logWriter)
+ return sm.destroyAndRemoveStack(ctx, name, stack, logWriter, nil)
}
// ForceRemoveStackConfiguration removes the configuration files pulumi creates for managing a stack.
// It DOES NOT perform any cleanup of the resources created by the stack. Call `DeleteStack` for correct cleanup.
-func (sm *StackManager) ForceRemoveStackConfiguration(ctx context.Context, name string) error {
+func (sm *StackManager) ForceRemoveStackConfiguration(ctx context.Context, name string) (err error) {
+ defer func() {
+ if err != nil {
+ err = common.InternalError{Err: err}
+ }
+ }()
+
stack, ok := sm.stacks.Get(name)
if !ok {
return fmt.Errorf("unable to remove stack %s: stack not present", name)
}
- deleteContext, cancel := context.WithTimeout(ctx, stackDeleteTimeout)
+ deleteContext, cancel := context.WithTimeout(ctx, defaultStackForceRemoveTimeout)
defer cancel()
return stack.Workspace().RemoveStack(deleteContext, stack.Name(), optremove.Force())
}
@@ -167,9 +277,9 @@ func (sm *StackManager) Cleanup(ctx context.Context) []error {
var errors []error
sm.stacks.Range(func(stackID string, stack *auto.Stack) {
- err := sm.deleteStack(ctx, stackID, stack, nil)
+ err := sm.destroyAndRemoveStack(ctx, stackID, stack, nil, nil)
if err != nil {
- errors = append(errors, err)
+ errors = append(errors, common.InternalError{Err: err})
}
})
@@ -194,17 +304,32 @@ func (sm *StackManager) getLoggingOptions() (debug.LoggingOptions, error) {
}, nil
}
-func (sm *StackManager) deleteStack(ctx context.Context, stackID string, stack *auto.Stack, logWriter io.Writer) error {
- if stack == nil {
- return fmt.Errorf("unable to find stack, skipping deletion of: %s", stackID)
+func (sm *StackManager) getProgressStreamsOnUp(logger io.Writer) optup.Option {
+ progressStreams, err := runner.GetProfile().ParamStore().GetBoolWithDefault(parameters.PulumiVerboseProgressStreams, false)
+ if err != nil {
+ return optup.ErrorProgressStreams(logger)
}
- destroyContext, cancel := context.WithTimeout(ctx, stackDestroyTimeout)
+ if progressStreams {
+ return optup.ProgressStreams(logger)
+ }
- loggingOptions, err := sm.getLoggingOptions()
+ return optup.ErrorProgressStreams(logger)
+}
+
+func (sm *StackManager) getProgressStreamsOnDestroy(logger io.Writer) optdestroy.Option {
+ progressStreams, err := runner.GetProfile().ParamStore().GetBoolWithDefault(parameters.PulumiVerboseProgressStreams, false)
if err != nil {
- return err
+ return optdestroy.ErrorProgressStreams(logger)
}
+
+ if progressStreams {
+ return optdestroy.ProgressStreams(logger)
+ }
+ return optdestroy.ErrorProgressStreams(logger)
+}
+
+func (sm *StackManager) destroyAndRemoveStack(ctx context.Context, stackID string, stack *auto.Stack, logWriter io.Writer, ddEventSender datadogEventSender) error {
var logger io.Writer
if logWriter == nil {
@@ -212,26 +337,125 @@ func (sm *StackManager) deleteStack(ctx context.Context, stackID string, stack *
} else {
logger = logWriter
}
- _, err = stack.Destroy(destroyContext, optdestroy.ProgressStreams(logger), optdestroy.DebugLogging(loggingOptions))
- cancel()
+ //initialize datadog event sender
+ if ddEventSender == nil {
+ ddEventSender = newDatadogEventSender(logger)
+ }
+
+ err := sm.destroyStack(ctx, stackID, stack, logger, ddEventSender)
if err != nil {
return err
}
- deleteContext, cancel := context.WithTimeout(ctx, stackDeleteTimeout)
- defer cancel()
- err = stack.Workspace().RemoveStack(deleteContext, stack.Name())
- return err
+ err = sm.removeStack(ctx, stackID, stack, logger, ddEventSender)
+ if err != nil {
+ // Failing removing the stack is not a critical error, the resources are already destroyed
+ // Print the error and return nil
+ fmt.Printf("Error during stack remove: %v\n", err)
+ }
+ return nil
+}
+
+func (sm *StackManager) destroyStack(ctx context.Context, stackID string, stack *auto.Stack, logger io.Writer, ddEventSender datadogEventSender) error {
+ if stack == nil {
+ return fmt.Errorf("unable to find stack, skipping destruction of: %s", stackID)
+ }
+ if logger == nil {
+ return fmt.Errorf("unable to find logger, skipping destruction of: %s", stackID)
+ }
+
+ loggingOptions, err := sm.getLoggingOptions()
+ if err != nil {
+ return err
+ }
+
+ progressStreamsDestroyOption := sm.getProgressStreamsOnDestroy(logger)
+
+ downCount := 0
+ var destroyErr error
+ for {
+ downCount++
+ destroyContext, cancel := context.WithTimeout(ctx, defaultStackDestroyTimeout)
+ _, destroyErr = stack.Destroy(destroyContext, progressStreamsDestroyOption, optdestroy.DebugLogging(loggingOptions))
+ cancel()
+ if destroyErr == nil {
+ sendEventToDatadog(ddEventSender, fmt.Sprintf("[E2E] Stack %s : success on Pulumi stack destroy", stackID), "", []string{"operation:destroy", "result:ok", fmt.Sprintf("stack:%s", stack.Name()), fmt.Sprintf("retries:%d", downCount)})
+ return nil
+ }
+
+ // handle timeout
+ contextCauseErr := context.Cause(destroyContext)
+ if errors.Is(contextCauseErr, context.DeadlineExceeded) {
+ sendEventToDatadog(ddEventSender, fmt.Sprintf("[E2E] Stack %s : timeout on Pulumi stack destroy", stackID), "", []string{"operation:destroy", fmt.Sprintf("stack:%s", stack.Name())})
+ fmt.Fprint(logger, "Timeout during stack destroy, trying to cancel stack's operation\n")
+ err := cancelStack(stack, defaultStackCancelTimeout)
+ if err != nil {
+ fmt.Fprintf(logger, "Giving up on error during attempt to cancel stack operation: %v\n", err)
+ return err
+ }
+ }
+
+ sendEventToDatadog(ddEventSender, fmt.Sprintf("[E2E] Stack %s : error on Pulumi stack destroy", stackID), destroyErr.Error(), []string{"operation:destroy", "result:fail", fmt.Sprintf("stack:%s", stack.Name()), fmt.Sprintf("retries:%d", downCount)})
+
+ if downCount > stackDestroyMaxRetry {
+ fmt.Fprintf(logger, "Giving up on error during stack destroy: %v\n", destroyErr)
+ return destroyErr
+ }
+ fmt.Fprintf(logger, "Retrying stack on error during stack destroy: %v\n", destroyErr)
+ }
+}
+
+func (sm *StackManager) removeStack(ctx context.Context, stackID string, stack *auto.Stack, logger io.Writer, ddEventSender datadogEventSender) error {
+ if stack == nil {
+ return fmt.Errorf("unable to find stack, skipping removal of: %s", stackID)
+ }
+ if logger == nil {
+ return fmt.Errorf("unable to find logger, skipping removal of: %s", stackID)
+ }
+
+ removeCount := 0
+ var err error
+ for {
+ removeCount++
+ removeContext, cancel := context.WithTimeout(ctx, defaultStackRemoveTimeout)
+ err = stack.Workspace().RemoveStack(removeContext, stack.Name())
+ cancel()
+ if err == nil {
+ sendEventToDatadog(ddEventSender, fmt.Sprintf("[E2E] Stack %s : success on Pulumi stack remove", stackID), "", []string{"operation:remove", "result:ok", fmt.Sprintf("stack:%s", stack.Name()), fmt.Sprintf("retries:%d", removeCount)})
+ return nil
+ }
+
+ // handle timeout
+ contextCauseErr := context.Cause(removeContext)
+ if errors.Is(contextCauseErr, context.DeadlineExceeded) {
+ sendEventToDatadog(ddEventSender, fmt.Sprintf("[E2E] Stack %s : timeout on Pulumi stack remove", stackID), "", []string{"operation:remove", fmt.Sprintf("stack:%s", stack.Name())})
+ fmt.Fprint(logger, "Timeout during stack remove\n")
+ continue
+ }
+
+ sendEventToDatadog(ddEventSender, fmt.Sprintf("[E2E] Stack %s : error on Pulumi stack remove", stackID), err.Error(), []string{"operation:remove", "result:fail", fmt.Sprintf("stack:%s", stack.Name()), fmt.Sprintf("retries:%d", removeCount)})
+
+ if removeCount > stackRemoveMaxRetry {
+ fmt.Fprintf(logger, "[WARNING] Giving up on error during stack remove: %v\nThe stack resources are destroyed, but we failed removing the stack state.\n", err)
+ return err
+ }
+ fmt.Printf("Retrying removing stack, error: %v\n", err)
+ }
}
-func (sm *StackManager) getStack(ctx context.Context, name string, config runner.ConfigMap, deployFunc pulumi.RunFunc, failOnMissing bool, logWriter io.Writer) (*auto.Stack, auto.UpResult, error) {
+func (sm *StackManager) getStack(ctx context.Context, name string, deployFunc pulumi.RunFunc, options ...GetStackOption) (*auto.Stack, auto.UpResult, error) {
+ params := getDefaultGetStackParams()
+ for _, opt := range options {
+ opt(¶ms)
+ }
+
// Build configuration from profile
profile := runner.GetProfile()
stackName := buildStackName(profile.NamePrefix(), name)
deployFunc = runFuncWithRecover(deployFunc)
// Inject common/managed parameters
- cm, err := runner.BuildStackParameters(profile, config)
+ cm, err := runner.BuildStackParameters(profile, params.Config)
if err != nil {
return nil, auto.UpResult{}, err
}
@@ -243,7 +467,7 @@ func (sm *StackManager) getStack(ctx context.Context, name string, config runner
}
newStack, err := auto.SelectStack(ctx, stackName, workspace)
- if auto.IsSelectStack404Error(err) && !failOnMissing {
+ if auto.IsSelectStack404Error(err) && !params.FailOnMissing {
newStack, err = auto.NewStack(ctx, stackName, workspace)
}
if err != nil {
@@ -265,46 +489,80 @@ func (sm *StackManager) getStack(ctx context.Context, name string, config runner
if err != nil {
return nil, auto.UpResult{}, err
}
- var logger io.Writer
+ var logger = params.LogWriter
- if logWriter == nil {
- logger = os.Stderr
- } else {
- logger = logWriter
- }
+ progressStreamsUpOption := sm.getProgressStreamsOnUp(logger)
+ progressStreamsDestroyOption := sm.getProgressStreamsOnDestroy(logger)
var upResult auto.UpResult
-
- for retry := 0; retry < stackUpRetry; retry++ {
- upCtx, cancel := context.WithTimeout(ctx, stackUpTimeout)
- upResult, err = stack.Up(upCtx, optup.ProgressStreams(logger), optup.DebugLogging(loggingOptions))
+ var upError error
+ upCount := 0
+
+ for {
+ upCount++
+ upCtx, cancel := context.WithTimeout(ctx, params.UpTimeout)
+ now := time.Now()
+ upResult, upError = stack.Up(upCtx, progressStreamsUpOption, optup.DebugLogging(loggingOptions))
+ fmt.Fprintf(logger, "Stack up took %v\n", time.Since(now))
cancel()
- if err == nil {
+ // early return on success
+ if upError == nil {
+ sendEventToDatadog(params.DatadogEventSender, fmt.Sprintf("[E2E] Stack %s : success on Pulumi stack up", name), "", []string{"operation:up", "result:ok", fmt.Sprintf("stack:%s", stack.Name()), fmt.Sprintf("retries:%d", upCount)})
break
}
- if retryStrategy := shouldRetryError(err); retryStrategy != noRetry {
- fmt.Fprintf(logger, "Got error that should be retried during stack up, retrying with %s strategy", retryStrategy)
- err := sendEventToDatadog(fmt.Sprintf("[E2E] Stack %s : retrying Pulumi stack up", name), err.Error(), []string{"operation:up", fmt.Sprintf("retry:%s", retryStrategy)}, logger)
+
+ // handle timeout
+ contextCauseErr := context.Cause(upCtx)
+ if errors.Is(contextCauseErr, context.DeadlineExceeded) {
+ sendEventToDatadog(params.DatadogEventSender, fmt.Sprintf("[E2E] Stack %s : timeout on Pulumi stack up", name), "", []string{"operation:up", fmt.Sprintf("stack:%s", stack.Name())})
+ fmt.Fprint(logger, "Timeout during stack up, trying to cancel stack's operation\n")
+ err = cancelStack(stack, params.CancelTimeout)
if err != nil {
- fmt.Fprintf(logger, "Got error when sending event to Datadog: %v", err)
+ fmt.Fprintf(logger, "Giving up on error during attempt to cancel stack operation: %v\n", err)
+ return stack, upResult, err
}
+ }
- if retryStrategy == reCreate {
- // If we are recreating the stack, we should destroy the stack first
- destroyCtx, cancel := context.WithTimeout(ctx, stackDestroyTimeout)
- _, err := stack.Destroy(destroyCtx, optdestroy.ProgressStreams(logger), optdestroy.DebugLogging(loggingOptions))
- cancel()
- if err != nil {
- return stack, auto.UpResult{}, err
- }
+ retryStrategy, changedOpts := sm.GetRetryStrategyFrom(upError, upCount)
+ sendEventToDatadog(params.DatadogEventSender, fmt.Sprintf("[E2E] Stack %s : error on Pulumi stack up", name), upError.Error(), []string{"operation:up", "result:fail", fmt.Sprintf("retry:%s", retryStrategy), fmt.Sprintf("stack:%s", stack.Name()), fmt.Sprintf("retries:%d", upCount)})
+
+ switch retryStrategy {
+ case ReUp:
+ fmt.Fprintf(logger, "Retrying stack on error during stack up: %v\n", upError)
+ case ReCreate:
+ fmt.Fprintf(logger, "Recreating stack on error during stack up: %v\n", upError)
+ destroyCtx, cancel := context.WithTimeout(ctx, params.DestroyTimeout)
+ _, err = stack.Destroy(destroyCtx, progressStreamsDestroyOption, optdestroy.DebugLogging(loggingOptions))
+ cancel()
+ if err != nil {
+ fmt.Fprintf(logger, "Error during stack destroy at recrate stack attempt: %v\n", err)
+ return stack, auto.UpResult{}, err
}
+ case NoRetry:
+ fmt.Fprintf(logger, "Giving up on error during stack up: %v\n", upError)
+ return stack, upResult, upError
+ }
- } else {
- break
+ if len(changedOpts) > 0 {
+ // apply changed options from retry strategy
+ for _, opt := range changedOpts {
+ opt(¶ms)
+ }
+
+ cm, err = runner.BuildStackParameters(profile, params.Config)
+ if err != nil {
+ return nil, auto.UpResult{}, fmt.Errorf("error trying to build new stack options on retry: %s", err)
+ }
+
+ err = stack.SetAllConfig(ctx, cm.ToPulumi())
+ if err != nil {
+ return nil, auto.UpResult{}, fmt.Errorf("error trying to change stack options on retry: %s", err)
+ }
}
}
- return stack, upResult, err
+
+ return stack, upResult, upError
}
func buildWorkspace(ctx context.Context, profile runner.Profile, stackName string, runFunc pulumi.RunFunc) (auto.Workspace, error) {
@@ -327,7 +585,7 @@ func buildWorkspace(ctx context.Context, profile runner.Profile, stackName strin
return nil, fmt.Errorf("unable to create temporary folder at: %s, err: %w", workspaceStackDir, err)
}
- fmt.Printf("Creating workspace for stack: %s at %s", stackName, workspaceStackDir)
+ fmt.Printf("Creating workspace for stack: %s at %s\n", stackName, workspaceStackDir)
return auto.NewLocalWorkspace(ctx,
auto.Project(project),
auto.Program(runFunc),
@@ -355,76 +613,45 @@ func runFuncWithRecover(f pulumi.RunFunc) pulumi.RunFunc {
}
}
-type retryType string
-
-const (
- reUp retryType = "ReUp" // Retry the up operation
- reCreate retryType = "ReCreate" // Retry the up operation after destroying the stack
- noRetry retryType = "NoRetry"
-)
-
-func shouldRetryError(err error) retryType {
- // Add here errors that are known to be flakes and that should be retried
- if strings.Contains(err.Error(), "i/o timeout") {
- return reCreate
+func (sm *StackManager) getRetryStrategyFrom(err error, upCount int) (RetryType, []GetStackOption) {
+ // if first attempt + retries count are higher than max retry, give up
+ if upCount > stackUpMaxRetry {
+ return NoRetry, nil
}
- if strings.Contains(err.Error(), "creating EC2 Instance: IdempotentParameterMismatch:") {
- return reUp
- }
-
- if strings.Contains(err.Error(), "InvalidInstanceID.NotFound") {
- return reUp
- }
-
- if strings.Contains(err.Error(), "create: timeout while waiting for state to become 'tfSTABLE'") {
- return reUp
+ for _, knownError := range sm.knownErrors {
+ isMatch, err := regexp.MatchString(knownError.errorMessage, err.Error())
+ if err != nil {
+ fmt.Printf("Error matching regex %s: %v\n", knownError.errorMessage, err)
+ }
+ if isMatch {
+ return knownError.retryType, nil
+ }
}
- return noRetry
+ return ReUp, nil
}
// sendEventToDatadog sends an event to Datadog, it will use the API Key from environment variable DD_API_KEY if present, otherwise it will use the one from SSM Parameter Store
-func sendEventToDatadog(title string, message string, tags []string, logger io.Writer) error {
- apiKey, err := runner.GetProfile().SecretStore().GetWithDefault(parameters.APIKey, "")
- if err != nil {
- fmt.Fprintf(logger, "error when getting API key from parameter store: %v", err)
- return err
- }
-
- if apiKey == "" {
- fmt.Fprintf(logger, "Skipping sending event because API key is empty")
- return nil
- }
-
- ctx := context.WithValue(context.Background(), datadog.ContextAPIKeys, map[string]datadog.APIKey{
- "apiKeyAuth": {
- Key: apiKey,
- },
- })
-
- configuration := datadog.NewConfiguration()
- apiClient := datadog.NewAPIClient(configuration)
- api := datadogV1.NewEventsApi(apiClient)
-
- _, r, err := api.CreateEvent(ctx, datadogV1.EventCreateRequest{
+func sendEventToDatadog(sender datadogEventSender, title string, message string, tags []string) {
+ sender.SendEvent(datadogV1.EventCreateRequest{
Title: title,
Text: message,
Tags: append([]string{"repository:datadog/datadog-agent", "test:new-e2e", "source:pulumi"}, tags...),
})
- if err != nil {
- fmt.Fprintf(logger, "error when calling `EventsApi.CreateEvent`: %v", err)
- fmt.Fprintf(logger, "Full HTTP response: %v\n", r)
- return err
- }
- return nil
}
// GetPulumiStackName returns the Pulumi stack name
// The internal Pulumi stack name should normally remain hidden as all the Pulumi interactions
// should be done via the StackManager.
// The only use case for getting the internal Pulumi stack name is to interact directly with Pulumi for debug purposes.
-func (sm *StackManager) GetPulumiStackName(name string) (string, error) {
+func (sm *StackManager) GetPulumiStackName(name string) (_ string, err error) {
+ defer func() {
+ if err != nil {
+ err = common.InternalError{Err: err}
+ }
+ }()
+
stack, ok := sm.stacks.Get(name)
if !ok {
return "", fmt.Errorf("stack %s not present", name)
@@ -432,3 +659,37 @@ func (sm *StackManager) GetPulumiStackName(name string) (string, error) {
return stack.Name(), nil
}
+
+func cancelStack(stack *auto.Stack, cancelTimeout time.Duration) error {
+ if cancelTimeout.Nanoseconds() == 0 {
+ cancelTimeout = defaultStackCancelTimeout
+ }
+ cancelCtx, cancel := context.WithTimeout(context.Background(), cancelTimeout)
+ err := stack.Cancel(cancelCtx)
+ cancel()
+
+ if err == nil {
+ return nil
+ }
+
+ // handle timeout
+ ctxCauseErr := context.Cause(cancelCtx)
+ if errors.Is(ctxCauseErr, context.DeadlineExceeded) {
+ return fmt.Errorf("timeout during stack cancel: %w", ctxCauseErr)
+ }
+
+ return err
+}
+
+func getDefaultGetStackParams() getStackParams {
+ var defaultLogger io.Writer = os.Stderr
+ return getStackParams{
+ Config: nil,
+ UpTimeout: defaultStackUpTimeout,
+ DestroyTimeout: defaultStackDestroyTimeout,
+ CancelTimeout: defaultStackCancelTimeout,
+ LogWriter: defaultLogger,
+ DatadogEventSender: newDatadogEventSender(defaultLogger),
+ FailOnMissing: false,
+ }
+}
diff --git a/test/new-e2e/pkg/utils/infra/stack_manager_test.go b/test/new-e2e/pkg/utils/infra/stack_manager_test.go
new file mode 100644
index 0000000000000..3dd9fa81c5380
--- /dev/null
+++ b/test/new-e2e/pkg/utils/infra/stack_manager_test.go
@@ -0,0 +1,289 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package infra implements utilities to interact with a Pulumi infrastructure
+package infra
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/common"
+
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadogV1"
+ "github.com/pulumi/pulumi/sdk/v3/go/auto"
+ "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type mockWriter struct {
+ logs []string
+}
+
+var _ io.Writer = &mockWriter{}
+
+func (m *mockWriter) Write(p []byte) (n int, err error) {
+ m.logs = append(m.logs, string(p))
+ return 0, nil
+}
+
+type mockDatadogEventSender struct {
+ events []datadogV1.EventCreateRequest
+}
+
+var _ datadogEventSender = &mockDatadogEventSender{}
+
+func (m *mockDatadogEventSender) SendEvent(body datadogV1.EventCreateRequest) {
+ m.events = append(m.events, body)
+}
+
+func TestStackManager(t *testing.T) {
+ stackManager := GetStackManager()
+ ctx := context.Background()
+
+ t.Run("should-succeed-on-successful-run-function", func(t *testing.T) {
+ t.Parallel()
+ t.Log("Should succeed on successful run function")
+ mockWriter := &mockWriter{
+ logs: []string{},
+ }
+ mockDatadogEventSender := &mockDatadogEventSender{
+ events: []datadogV1.EventCreateRequest{},
+ }
+ stackName := "test-successful"
+ stack, result, err := stackManager.GetStackNoDeleteOnFailure(
+ ctx,
+ stackName,
+ func(*pulumi.Context) error {
+ return nil
+ },
+ WithLogWriter(mockWriter),
+ WithDatadogEventSender(mockDatadogEventSender),
+ )
+ require.NoError(t, err)
+ require.NotNil(t, stack)
+ defer func() {
+ err := stackManager.DeleteStack(ctx, stackName, mockWriter)
+ require.NoError(t, err)
+ }()
+ require.NotNil(t, result)
+ retryOnErrorLogs := filterRetryOnErrorLogs(mockWriter.logs)
+ assert.Empty(t, retryOnErrorLogs)
+ assert.Len(t, mockDatadogEventSender.events, 1)
+ assert.Contains(t, mockDatadogEventSender.events[0].Title, fmt.Sprintf("[E2E] Stack %s : success on Pulumi stack up", stackName))
+ })
+
+ t.Run("should-retry-and-succeed", func(t *testing.T) {
+ for errCount := 0; errCount < stackUpMaxRetry; errCount++ {
+ errCount := errCount
+ t.Run(fmt.Sprintf("should-retry-and-succeed-%d", errCount), func(t *testing.T) {
+ t.Parallel()
+ t.Log("Should retry on failing run function and eventually succeed")
+ mockWriter := &mockWriter{
+ logs: []string{},
+ }
+ mockDatadogEventSender := &mockDatadogEventSender{
+ events: []datadogV1.EventCreateRequest{},
+ }
+ stackUpCounter := 0
+ stackName := fmt.Sprintf("test-retry-%d", errCount)
+ stack, result, err := stackManager.GetStackNoDeleteOnFailure(
+ ctx,
+ stackName,
+ func(*pulumi.Context) error {
+ stackUpCounter++
+ if stackUpCounter > errCount {
+ return nil
+ }
+ return fmt.Errorf("error %d", stackUpCounter)
+ },
+ WithLogWriter(mockWriter),
+ WithDatadogEventSender(mockDatadogEventSender),
+ )
+ require.NoError(t, err)
+ require.NotNil(t, stack)
+ defer func() {
+ err := stackManager.DeleteStack(ctx, stackName, mockWriter)
+ require.NoError(t, err)
+ }()
+ require.NotNil(t, result)
+ retryOnErrorLogs := filterRetryOnErrorLogs(mockWriter.logs)
+ assert.Len(t, retryOnErrorLogs, errCount, fmt.Sprintf("should have %d error logs", errCount))
+ for i := 0; i < errCount; i++ {
+ assert.Contains(t, retryOnErrorLogs[i], "Retrying stack on error during stack up")
+ assert.Contains(t, retryOnErrorLogs[i], fmt.Sprintf("error %d", i+1))
+ }
+ assert.Len(t, mockDatadogEventSender.events, errCount+1)
+ for i := 0; i < errCount; i++ {
+ assert.Contains(t, mockDatadogEventSender.events[i].Title, fmt.Sprintf("[E2E] Stack %s : error on Pulumi stack up", stackName))
+ }
+ assert.Contains(t, mockDatadogEventSender.events[len(mockDatadogEventSender.events)-1].Title, fmt.Sprintf("[E2E] Stack %s : success on Pulumi stack up", stackName))
+ })
+ }
+ })
+
+ t.Run("should-eventually-fail", func(t *testing.T) {
+ t.Parallel()
+ t.Log("Should retry on failing run function and eventually fail")
+ mockWriter := &mockWriter{
+ logs: []string{},
+ }
+ mockDatadogEventSender := &mockDatadogEventSender{
+ events: []datadogV1.EventCreateRequest{},
+ }
+ stackUpCounter := 0
+ stackName := "test-retry-failure"
+ stack, result, err := stackManager.GetStackNoDeleteOnFailure(
+ ctx,
+ stackName,
+ func(*pulumi.Context) error {
+ stackUpCounter++
+ return fmt.Errorf("error %d", stackUpCounter)
+ },
+ WithLogWriter(mockWriter),
+ WithDatadogEventSender(mockDatadogEventSender),
+ )
+ assert.Error(t, err)
+ assert.ErrorIs(t, err, common.InternalError{}, "should be an internal error")
+ require.NotNil(t, stack)
+ defer func() {
+ err := stackManager.DeleteStack(ctx, stackName, mockWriter)
+ require.NoError(t, err)
+ }()
+ assert.Equal(t, auto.UpResult{}, result)
+
+ retryOnErrorLogs := filterRetryOnErrorLogs(mockWriter.logs)
+ assert.Len(t, retryOnErrorLogs, stackUpMaxRetry, fmt.Sprintf("should have %d logs", stackUpMaxRetry+1))
+ for i := 0; i < stackUpMaxRetry; i++ {
+ assert.Contains(t, retryOnErrorLogs[i], "Retrying stack on error during stack up")
+ assert.Contains(t, retryOnErrorLogs[i], fmt.Sprintf("error %d", i+1))
+ }
+ assert.Len(t, mockDatadogEventSender.events, stackUpMaxRetry+1, fmt.Sprintf("should have %d events", stackUpMaxRetry+1))
+ for i := 0; i < stackUpMaxRetry+1; i++ {
+ assert.Contains(t, mockDatadogEventSender.events[i].Title, fmt.Sprintf("[E2E] Stack %s : error on Pulumi stack up", stackName))
+ }
+ assert.Contains(t, mockDatadogEventSender.events[len(mockDatadogEventSender.events)-1].Tags, "retry:NoRetry")
+ })
+
+ t.Run("should-cancel-and-retry-on-timeout", func(t *testing.T) {
+ t.Parallel()
+
+ mockWriter := &mockWriter{
+ logs: []string{},
+ }
+ mockDatadogEventSender := &mockDatadogEventSender{
+ events: []datadogV1.EventCreateRequest{},
+ }
+ stackUpCounter := 0
+ stackName := "test-cancel-retry-timeout"
+ // override stackUpTimeout to 10s
+ // average up time with an dummy run function is 5s
+ stackUpTimeout := 10 * time.Second
+ stack, result, err := stackManager.GetStackNoDeleteOnFailure(
+ ctx,
+ stackName,
+ func(*pulumi.Context) error {
+ if stackUpCounter == 0 {
+ // sleep only first time to ensure context is cancelled
+ // on timeout
+ t.Logf("Sleeping for %f", 2*stackUpTimeout.Seconds())
+ time.Sleep(2 * stackUpTimeout)
+ }
+ stackUpCounter++
+ return nil
+ },
+ WithLogWriter(mockWriter),
+ WithDatadogEventSender(mockDatadogEventSender),
+ WithUpTimeout(stackUpTimeout),
+ )
+
+ assert.NoError(t, err)
+ require.NotNil(t, stack)
+ assert.NotNil(t, result)
+ defer func() {
+ err := stackManager.DeleteStack(ctx, stackName, mockWriter)
+ require.NoError(t, err)
+ }()
+ // filter timeout logs
+ timeoutLogs := []string{}
+ for _, log := range mockWriter.logs {
+ if strings.Contains(log, "Timeout during stack up, trying to cancel stack's operation") {
+ timeoutLogs = append(timeoutLogs, log)
+ }
+ }
+ assert.Len(t, timeoutLogs, 1)
+ retryOnErrorLogs := filterRetryOnErrorLogs(mockWriter.logs)
+ assert.Len(t, retryOnErrorLogs, 1)
+ assert.Len(t, mockDatadogEventSender.events, 3)
+ assert.Contains(t, mockDatadogEventSender.events[0].Title, fmt.Sprintf("[E2E] Stack %s : timeout on Pulumi stack up", stackName))
+ assert.Contains(t, mockDatadogEventSender.events[1].Title, fmt.Sprintf("[E2E] Stack %s : error on Pulumi stack up", stackName))
+ assert.Contains(t, mockDatadogEventSender.events[2].Title, fmt.Sprintf("[E2E] Stack %s : success on Pulumi stack up", stackName))
+ })
+
+ t.Run("should-return-retry-strategy-on-retriable-errors", func(t *testing.T) {
+ t.Parallel()
+
+ type testError struct {
+ name string
+ errMessage string
+ expectedRetryType RetryType
+ }
+
+ testErrors := []testError{
+ {
+ name: "timeout",
+ errMessage: "i/o timeout",
+ expectedRetryType: ReCreate,
+ },
+ {
+ name: "connection-refused",
+ errMessage: "failed attempts: dial tcp :22: connect: connection refused",
+ expectedRetryType: ReCreate,
+ },
+ {
+ name: "resource-not-exist",
+ errMessage: "Resource provider reported that the resource did not exist while updating",
+ expectedRetryType: ReCreate,
+ },
+ {
+ name: "cloud-init-timeout",
+ errMessage: "Process exited with status 2: running \" sudo cloud-init status --wait\"",
+ expectedRetryType: ReCreate,
+ },
+ {
+ name: "cloud-init-timeout",
+ errMessage: "Process exited with status 6: running \" sudo cloud-init status --wait\"",
+ expectedRetryType: ReCreate,
+ },
+ {
+ name: "ecs-fakeintake-timeout",
+ errMessage: "waiting for ECS Service (arn:aws:ecs:us-east-1:669783387624:service/fakeintake-ecs/ci-633219896-4670-e2e-dockersuite-80f62edf7bcc6194-aws-fakeintake-dockervm-srv) create: timeout while waiting for state to become 'tfSTABLE' (last state: 'tfPENDING', timeout: 20m0s)",
+ expectedRetryType: ReCreate,
+ },
+ }
+
+ for _, te := range testErrors {
+ err := errors.New(te.errMessage)
+ retryType, _ := stackManager.getRetryStrategyFrom(err, 0)
+ assert.Equal(t, te.expectedRetryType, retryType, te.name)
+ }
+ })
+}
+
+func filterRetryOnErrorLogs(logs []string) []string {
+ retryOnErrorLogs := []string{}
+ for _, log := range logs {
+ if strings.Contains(log, "Retrying stack on error during stack up") {
+ retryOnErrorLogs = append(retryOnErrorLogs, log)
+ }
+ }
+ return retryOnErrorLogs
+}
diff --git a/test/new-e2e/system-probe/system-probe-test-env.go b/test/new-e2e/system-probe/system-probe-test-env.go
index e026e93746f87..e7f0ee28fdcf6 100644
--- a/test/new-e2e/system-probe/system-probe-test-env.go
+++ b/test/new-e2e/system-probe/system-probe-test-env.go
@@ -252,12 +252,18 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *EnvOpts) (*
config["ddinfra:aws/defaultSubnets"] = auto.ConfigValue{Value: az}
}
- pulumiStack, upResult, err = stackManager.GetStackNoDeleteOnFailure(systemProbeTestEnv.context, systemProbeTestEnv.name, config, func(ctx *pulumi.Context) error {
- if err := microvms.Run(ctx); err != nil {
- return fmt.Errorf("setup micro-vms in remote instance: %w", err)
- }
- return nil
- }, opts.FailOnMissing, nil)
+ pulumiStack, upResult, err = stackManager.GetStackNoDeleteOnFailure(
+ systemProbeTestEnv.context,
+ systemProbeTestEnv.name,
+ func(ctx *pulumi.Context) error {
+ if err := microvms.Run(ctx); err != nil {
+ return fmt.Errorf("setup micro-vms in remote instance: %w", err)
+ }
+ return nil
+ },
+ infra.WithFailOnMissing(opts.FailOnMissing),
+ infra.WithConfigMap(config),
+ )
if err != nil {
return handleScenarioFailure(err, func(possibleError handledError) {
// handle the following errors by trying in a different availability zone
diff --git a/test/new-e2e/test-infra-definition/vm_test.go b/test/new-e2e/test-infra-definition/vm_test.go
index 63665cb59121f..a396b65a7eb25 100644
--- a/test/new-e2e/test-infra-definition/vm_test.go
+++ b/test/new-e2e/test-infra-definition/vm_test.go
@@ -82,7 +82,7 @@ func TestVMSuite(t *testing.T) {
func (v *vmSuiteWithAMI) TestWithImageName() {
vm := v.Env().RemoteHost
- metadata := client.NewEC2Metadata(vm)
+ metadata := client.NewEC2Metadata(v.T(), vm.Host, vm.OSFamily)
require.Equal(v.T(), requestedAmi, metadata.Get("ami-id"))
require.Equal(v.T(), "aarch64\n", vm.MustExecute("uname -m"))
require.Contains(v.T(), vm.MustExecute("grep PRETTY_NAME /etc/os-release"), "Amazon Linux")
@@ -90,7 +90,7 @@ func (v *vmSuiteWithAMI) TestWithImageName() {
func (v *vmSuiteWithInstanceType) TestWithInstanceType() {
vm := v.Env().RemoteHost
- metadata := client.NewEC2Metadata(vm)
+ metadata := client.NewEC2Metadata(v.T(), vm.Host, vm.OSFamily)
require.Equal(v.T(), metadata.Get("instance-type"), instanceType)
}
diff --git a/test/new-e2e/tests/agent-platform/common/test_client.go b/test/new-e2e/tests/agent-platform/common/test_client.go
index 9d88025196e23..d9d9b6479434d 100644
--- a/test/new-e2e/tests/agent-platform/common/test_client.go
+++ b/test/new-e2e/tests/agent-platform/common/test_client.go
@@ -12,6 +12,7 @@ import (
"time"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/components"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclient"
boundport "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-platform/common/bound-port"
@@ -191,10 +192,11 @@ func (c *TestClient) ExecuteWithRetry(cmd string) (string, error) {
}
// NewWindowsTestClient create a TestClient for Windows VM
-func NewWindowsTestClient(t *testing.T, host *components.RemoteHost) *TestClient {
+func NewWindowsTestClient(context e2e.Context, host *components.RemoteHost) *TestClient {
fileManager := filemanager.NewRemoteHost(host)
+ t := context.T()
- agentClient, err := client.NewHostAgentClient(t, host, false)
+ agentClient, err := client.NewHostAgentClient(context, host.HostOutput, false)
require.NoError(t, err)
helper := helpers.NewWindowsHelper()
diff --git a/test/new-e2e/tests/agent-platform/install-script/install_script_test.go b/test/new-e2e/tests/agent-platform/install-script/install_script_test.go
index 84743e2e0fc66..89fc4a9ba9b38 100644
--- a/test/new-e2e/tests/agent-platform/install-script/install_script_test.go
+++ b/test/new-e2e/tests/agent-platform/install-script/install_script_test.go
@@ -114,7 +114,7 @@ func (is *installScriptSuite) testUninstall(client *common.TestClient, flavor st
func (is *installScriptSuite) AgentTest(flavor string) {
host := is.Env().RemoteHost
fileManager := filemanager.NewUnix(host)
- agentClient, err := client.NewHostAgentClient(is.T(), host, false)
+ agentClient, err := client.NewHostAgentClient(is, host.HostOutput, false)
require.NoError(is.T(), err)
unixHelper := helpers.NewUnix()
@@ -146,7 +146,7 @@ func (is *installScriptSuite) AgentTest(flavor string) {
func (is *installScriptSuite) IotAgentTest() {
host := is.Env().RemoteHost
fileManager := filemanager.NewUnix(host)
- agentClient, err := client.NewHostAgentClient(is.T(), host, false)
+ agentClient, err := client.NewHostAgentClient(is, host.HostOutput, false)
require.NoError(is.T(), err)
unixHelper := helpers.NewUnix()
@@ -167,7 +167,7 @@ func (is *installScriptSuite) IotAgentTest() {
func (is *installScriptSuite) DogstatsdAgentTest() {
host := is.Env().RemoteHost
fileManager := filemanager.NewUnix(host)
- agentClient, err := client.NewHostAgentClient(is.T(), host, false)
+ agentClient, err := client.NewHostAgentClient(is, host.HostOutput, false)
require.NoError(is.T(), err)
unixHelper := helpers.NewUnixDogstatsd()
diff --git a/test/new-e2e/tests/agent-platform/rpm/rpm_test.go b/test/new-e2e/tests/agent-platform/rpm/rpm_test.go
index ae245bc21f61e..7d4a9736ccdc3 100644
--- a/test/new-e2e/tests/agent-platform/rpm/rpm_test.go
+++ b/test/new-e2e/tests/agent-platform/rpm/rpm_test.go
@@ -95,9 +95,10 @@ func TestRpmScript(t *testing.T) {
}
func (is *rpmTestSuite) TestRpm() {
+ host := is.Env().RemoteHost
filemanager := filemanager.NewUnix(is.Env().RemoteHost)
unixHelper := helpers.NewUnix()
- agentClient, err := client.NewHostAgentClient(is.T(), is.Env().RemoteHost, false)
+ agentClient, err := client.NewHostAgentClient(is, host.HostOutput, false)
require.NoError(is.T(), err)
VMclient := common.NewTestClient(is.Env().RemoteHost, agentClient, filemanager, unixHelper)
diff --git a/test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go b/test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go
index c36e237779cd1..8f866e3fcf6bf 100644
--- a/test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go
+++ b/test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go
@@ -116,9 +116,10 @@ func TestStepByStepScript(t *testing.T) {
}
func (is *stepByStepSuite) TestStepByStep() {
+ host := is.Env().RemoteHost
fileManager := filemanager.NewUnix(is.Env().RemoteHost)
unixHelper := helpers.NewUnix()
- agentClient, err := client.NewHostAgentClient(is.T(), is.Env().RemoteHost, false)
+ agentClient, err := client.NewHostAgentClient(is, host.HostOutput, false)
require.NoError(is.T(), err)
VMclient := common.NewTestClient(is.Env().RemoteHost, agentClient, fileManager, unixHelper)
diff --git a/test/new-e2e/tests/agent-platform/upgrade/upgrade_test.go b/test/new-e2e/tests/agent-platform/upgrade/upgrade_test.go
index 8c10bfba422ac..bfd7bb9306579 100644
--- a/test/new-e2e/tests/agent-platform/upgrade/upgrade_test.go
+++ b/test/new-e2e/tests/agent-platform/upgrade/upgrade_test.go
@@ -84,8 +84,8 @@ func TestUpgradeScript(t *testing.T) {
func (is *upgradeSuite) TestUpgrade() {
fileManager := filemanager.NewUnix(is.Env().RemoteHost)
-
- agentClient, err := client.NewHostAgentClient(is.T(), is.Env().RemoteHost, false)
+ host := is.Env().RemoteHost
+ agentClient, err := client.NewHostAgentClient(is, host.HostOutput, false)
require.NoError(is.T(), err)
unixHelper := helpers.NewUnix()
diff --git a/test/new-e2e/tests/agent-shared-components/forwarder/nss_failover_test.go b/test/new-e2e/tests/agent-shared-components/forwarder/nss_failover_test.go
index 50022acdb92fc..373bd61ce39af 100644
--- a/test/new-e2e/tests/agent-shared-components/forwarder/nss_failover_test.go
+++ b/test/new-e2e/tests/agent-shared-components/forwarder/nss_failover_test.go
@@ -44,7 +44,7 @@ type multiFakeIntakeEnv struct {
func (e *multiFakeIntakeEnv) Init(ctx e2e.Context) error {
if e.Agent != nil {
- agent, err := client.NewHostAgentClient(ctx.T(), e.Host, true)
+ agent, err := client.NewHostAgentClient(ctx, e.Host.HostOutput, true)
if err != nil {
return err
}
@@ -95,7 +95,7 @@ func multiFakeIntakeAWS(agentOptions ...agentparams.Option) e2e.Provisioner {
}
host.Export(ctx, &env.Host.HostOutput)
- agent, err := agent.NewHostAgent(awsEnv.CommonEnvironment, host, agentOptions...)
+ agent, err := agent.NewHostAgent(&awsEnv, host, agentOptions...)
if err != nil {
return err
}
diff --git a/test/new-e2e/tests/agent-subcommands/hostname/hostname_ec2_nix_test.go b/test/new-e2e/tests/agent-subcommands/hostname/hostname_ec2_nix_test.go
index 26191579da22f..1e8510c6566d0 100644
--- a/test/new-e2e/tests/agent-subcommands/hostname/hostname_ec2_nix_test.go
+++ b/test/new-e2e/tests/agent-subcommands/hostname/hostname_ec2_nix_test.go
@@ -37,7 +37,7 @@ func (v *linuxHostnameSuite) TestAgentConfigHostnameFileOverride() {
func (v *linuxHostnameSuite) TestAgentConfigPreferImdsv2() {
v.UpdateEnv(awshost.ProvisionerNoFakeIntake(v.GetOs(), awshost.WithAgentOptions(agentparams.WithAgentConfig("ec2_prefer_imdsv2: true"))))
// e2e metadata provider already uses IMDSv2
- metadata := client.NewEC2Metadata(v.Env().RemoteHost)
+ metadata := client.NewEC2Metadata(v.T(), v.Env().RemoteHost.Host, v.Env().RemoteHost.OSFamily)
hostname := v.Env().Agent.Client.Hostname()
resourceID := metadata.Get("instance-id")
@@ -48,7 +48,7 @@ func (v *linuxHostnameSuite) TestAgentConfigPreferImdsv2() {
func (v *linuxHostnameSuite) TestAgentHostnameDefaultsToResourceId() {
v.UpdateEnv(awshost.ProvisionerNoFakeIntake(v.GetOs(), awshost.WithAgentOptions(agentparams.WithAgentConfig(""))))
- metadata := client.NewEC2Metadata(v.Env().RemoteHost)
+ metadata := client.NewEC2Metadata(v.T(), v.Env().RemoteHost.Host, v.Env().RemoteHost.OSFamily)
hostname := v.Env().Agent.Client.Hostname()
// Default configuration of hostname for EC2 instances is the resource-id
diff --git a/test/new-e2e/tests/agent-subcommands/hostname/hostname_ec2_win_test.go b/test/new-e2e/tests/agent-subcommands/hostname/hostname_ec2_win_test.go
index 20047da866ee9..4187305c7a8d1 100644
--- a/test/new-e2e/tests/agent-subcommands/hostname/hostname_ec2_win_test.go
+++ b/test/new-e2e/tests/agent-subcommands/hostname/hostname_ec2_win_test.go
@@ -42,7 +42,7 @@ ec2_use_windows_prefix_detection: true`
v.UpdateEnv(awshost.ProvisionerNoFakeIntake(v.GetOs(), awshost.WithAgentOptions(agentparams.WithAgentConfig(config))))
// e2e metadata provider already uses IMDSv2
- metadata := client.NewEC2Metadata(v.Env().RemoteHost)
+ metadata := client.NewEC2Metadata(v.T(), v.Env().RemoteHost.Host, v.Env().RemoteHost.OSFamily)
hostname := v.Env().Agent.Client.Hostname()
resourceID := metadata.Get("instance-id")
diff --git a/test/new-e2e/tests/agent-subcommands/status/status_nix_test.go b/test/new-e2e/tests/agent-subcommands/status/status_nix_test.go
index 36328a578d4d9..463a75acac032 100644
--- a/test/new-e2e/tests/agent-subcommands/status/status_nix_test.go
+++ b/test/new-e2e/tests/agent-subcommands/status/status_nix_test.go
@@ -24,7 +24,7 @@ func TestLinuxStatusSuite(t *testing.T) {
}
func (v *linuxStatusSuite) TestStatusHostname() {
- metadata := client.NewEC2Metadata(v.Env().RemoteHost)
+ metadata := client.NewEC2Metadata(v.T(), v.Env().RemoteHost.Host, v.Env().RemoteHost.OSFamily)
resourceID := metadata.Get("instance-id")
status := v.Env().Agent.Client.Status()
diff --git a/test/new-e2e/tests/agent-subcommands/status/status_win_test.go b/test/new-e2e/tests/agent-subcommands/status/status_win_test.go
index 41a2d450f2185..e5b31a6229456 100644
--- a/test/new-e2e/tests/agent-subcommands/status/status_win_test.go
+++ b/test/new-e2e/tests/agent-subcommands/status/status_win_test.go
@@ -25,7 +25,7 @@ func TestWindowsStatusSuite(t *testing.T) {
}
func (v *windowsStatusSuite) TestStatusHostname() {
- metadata := client.NewEC2Metadata(v.Env().RemoteHost)
+ metadata := client.NewEC2Metadata(v.T(), v.Env().RemoteHost.Host, v.Env().RemoteHost.OSFamily)
resourceID := metadata.Get("instance-id")
status := v.Env().Agent.Client.Status()
diff --git a/test/new-e2e/tests/apm/vm_test.go b/test/new-e2e/tests/apm/vm_test.go
index 145223e668381..fd819f91aabc9 100644
--- a/test/new-e2e/tests/apm/vm_test.go
+++ b/test/new-e2e/tests/apm/vm_test.go
@@ -269,7 +269,7 @@ func waitRemotePort(v *VMFakeintakeSuite, port uint16) error {
v.Eventually(func() bool {
v.T().Logf("Waiting for remote:%v", port)
// TODO: Use the e2e context
- c, err = v.Env().RemoteHost.DialRemotePort(port)
+ c, err = v.Env().RemoteHost.DialPort(port)
if err != nil {
v.T().Logf("Failed to dial remote:%v: %s\n", port, err)
return false
diff --git a/test/new-e2e/tests/containers/ecs_test.go b/test/new-e2e/tests/containers/ecs_test.go
index 92ffa59d86712..295c7651e43ac 100644
--- a/test/new-e2e/tests/containers/ecs_test.go
+++ b/test/new-e2e/tests/containers/ecs_test.go
@@ -62,7 +62,7 @@ func (suite *ecsSuite) SetupSuite() {
"ddtestworkload:deploy": auto.ConfigValue{Value: "true"},
}
- _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure(ctx, "ecs-cluster", stackConfig, ecs.Run, false, nil)
+ _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure(ctx, "ecs-cluster", ecs.Run, infra.WithConfigMap(stackConfig))
suite.Require().NoError(err)
fakeintake := &components.FakeIntake{}
diff --git a/test/new-e2e/tests/containers/eks_test.go b/test/new-e2e/tests/containers/eks_test.go
index da1e21b0bc028..53fafad333653 100644
--- a/test/new-e2e/tests/containers/eks_test.go
+++ b/test/new-e2e/tests/containers/eks_test.go
@@ -38,7 +38,7 @@ func (suite *eksSuite) SetupSuite() {
"dddogstatsd:deploy": auto.ConfigValue{Value: "true"},
}
- _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure(ctx, "eks-cluster", stackConfig, eks.Run, false, nil)
+ _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure(ctx, "eks-cluster", eks.Run, infra.WithConfigMap(stackConfig))
if !suite.Assert().NoError(err) {
stackName, err := infra.GetStackManager().GetPulumiStackName("eks-cluster")
suite.Require().NoError(err)
diff --git a/test/new-e2e/tests/containers/kindvm_test.go b/test/new-e2e/tests/containers/kindvm_test.go
index 8697c6c5edb27..762136cb3b820 100644
--- a/test/new-e2e/tests/containers/kindvm_test.go
+++ b/test/new-e2e/tests/containers/kindvm_test.go
@@ -38,7 +38,7 @@ func (suite *kindSuite) SetupSuite() {
"dddogstatsd:deploy": auto.ConfigValue{Value: "true"},
}
- _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure(ctx, "kind-cluster", stackConfig, kindvm.Run, false, nil)
+ _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure(ctx, "kind-cluster", kindvm.Run, infra.WithConfigMap(stackConfig))
if !suite.Assert().NoError(err) {
stackName, err := infra.GetStackManager().GetPulumiStackName("kind-cluster")
suite.Require().NoError(err)
diff --git a/test/new-e2e/tests/cws/fargate_test.go b/test/new-e2e/tests/cws/fargate_test.go
index 3f9896d2d483c..0760a8e075bc9 100644
--- a/test/new-e2e/tests/cws/fargate_test.go
+++ b/test/new-e2e/tests/cws/fargate_test.go
@@ -83,7 +83,7 @@ func TestECSFargate(t *testing.T) {
// Setup agent API key
apiKeyParam, err := ssm.NewParameter(ctx, awsEnv.Namer.ResourceName("agent-apikey"), &ssm.ParameterArgs{
- Name: awsEnv.CommonNamer.DisplayName(1011, pulumi.String("agent-apikey")),
+ Name: awsEnv.CommonNamer().DisplayName(1011, pulumi.String("agent-apikey")),
Type: ssm.ParameterTypeSecureString,
Value: awsEnv.AgentAPIKey(),
}, awsEnv.WithProviders(configCommon.ProviderAWS, configCommon.ProviderAWSX))
@@ -217,7 +217,7 @@ func TestECSFargate(t *testing.T) {
TaskRole: &awsx.DefaultRoleWithPolicyArgs{
RoleArn: pulumi.StringPtr(awsEnv.ECSTaskRole()),
},
- Family: awsEnv.CommonNamer.DisplayName(255, pulumi.String("cws-task")),
+ Family: awsEnv.CommonNamer().DisplayName(255, pulumi.String("cws-task")),
}, awsEnv.WithProviders(configCommon.ProviderAWS, configCommon.ProviderAWSX))
if err != nil {
return err
diff --git a/test/new-e2e/tests/ndm/snmp/snmp_test.go b/test/new-e2e/tests/ndm/snmp/snmp_test.go
index 3abf7627ab308..8df935ded32ac 100644
--- a/test/new-e2e/tests/ndm/snmp/snmp_test.go
+++ b/test/new-e2e/tests/ndm/snmp/snmp_test.go
@@ -16,6 +16,7 @@ import (
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
"github.com/stretchr/testify/assert"
+ "github.com/DataDog/test-infra-definitions/common/utils"
"github.com/DataDog/test-infra-definitions/components/datadog/agent"
"github.com/DataDog/test-infra-definitions/components/datadog/dockeragentparams"
"github.com/DataDog/test-infra-definitions/components/docker"
@@ -77,8 +78,7 @@ func snmpDockerProvisioner() e2e.Provisioner {
if err != nil {
return err
}
- dontUseSudo := false
- fileCommand, err := filemanager.CopyInlineFile(pulumi.String(fileContent), path.Join(dataPath, fileName), dontUseSudo,
+ fileCommand, err := filemanager.CopyInlineFile(pulumi.String(fileContent), path.Join(dataPath, fileName),
pulumi.DependsOn([]pulumi.Resource{createDataDirCommand}))
if err != nil {
return err
@@ -91,14 +91,18 @@ func snmpDockerProvisioner() e2e.Provisioner {
return err
}
// edit snmp config file
- dontUseSudo := false
- configCommand, err := filemanager.CopyInlineFile(pulumi.String(snmpConfig), path.Join(configPath, "snmp.yaml"), dontUseSudo,
+ configCommand, err := filemanager.CopyInlineFile(pulumi.String(snmpConfig), path.Join(configPath, "snmp.yaml"),
pulumi.DependsOn([]pulumi.Resource{createConfigDirCommand}))
if err != nil {
return err
}
- dockerManager, _, err := docker.NewManager(*awsEnv.CommonEnvironment, host)
+ installEcrCredsHelperCmd, err := ec2.InstallECRCredentialsHelper(awsEnv, host)
+ if err != nil {
+ return err
+ }
+
+ dockerManager, err := docker.NewManager(&awsEnv, host, utils.PulumiDependsOn(installEcrCredsHelperCmd))
if err != nil {
return err
}
@@ -106,7 +110,7 @@ func snmpDockerProvisioner() e2e.Provisioner {
envVars := pulumi.StringMap{"DATA_DIR": pulumi.String(dataPath), "CONFIG_DIR": pulumi.String(configPath)}
composeDependencies := []pulumi.Resource{createDataDirCommand, configCommand}
composeDependencies = append(composeDependencies, fileCommands...)
- dockerAgent, err := agent.NewDockerAgent(*awsEnv.CommonEnvironment, host, dockerManager,
+ dockerAgent, err := agent.NewDockerAgent(&awsEnv, host, dockerManager,
dockeragentparams.WithFakeintake(fakeIntake),
dockeragentparams.WithExtraComposeManifest("snmpsim", pulumi.String(snmpCompose)),
dockeragentparams.WithEnvironmentVariables(envVars),
diff --git a/test/new-e2e/tests/npm/ec2_1host_containerized_test.go b/test/new-e2e/tests/npm/ec2_1host_containerized_test.go
index 47f1a57c6a07d..49890e52ed019 100644
--- a/test/new-e2e/tests/npm/ec2_1host_containerized_test.go
+++ b/test/new-e2e/tests/npm/ec2_1host_containerized_test.go
@@ -36,13 +36,12 @@ func dockerHostHttpbinEnvProvisioner() e2e.PulumiEnvRunFunc[dockerHostNginxEnv]
if err != nil {
return err
}
- env.DockerHost.AwsEnvironment = &awsEnv
opts := []awsdocker.ProvisionerOption{
awsdocker.WithAgentOptions(systemProbeConfigNPMEnv()...),
}
params := awsdocker.GetProvisionerParams(opts...)
- awsdocker.Run(ctx, &env.DockerHost, params)
+ awsdocker.Run(ctx, &env.DockerHost, awsdocker.RunParams{Environment: &awsEnv, ProvisionerParams: params})
vmName := "httpbinvm"
@@ -56,7 +55,7 @@ func dockerHostHttpbinEnvProvisioner() e2e.PulumiEnvRunFunc[dockerHostNginxEnv]
}
// install docker.io
- manager, _, err := docker.NewManager(*awsEnv.CommonEnvironment, nginxHost)
+ manager, err := docker.NewManager(&awsEnv, nginxHost)
if err != nil {
return err
}
diff --git a/test/new-e2e/tests/npm/ec2_1host_selinux_test.go b/test/new-e2e/tests/npm/ec2_1host_selinux_test.go
index c03cbee997b32..b0c6fe015253d 100644
--- a/test/new-e2e/tests/npm/ec2_1host_selinux_test.go
+++ b/test/new-e2e/tests/npm/ec2_1host_selinux_test.go
@@ -60,7 +60,7 @@ func (v *ec2VMSELinuxSuite) SetupSuite() {
v.Env().RemoteHost.MustExecute("sudo yum install -y docker-ce docker-ce-cli")
v.Env().RemoteHost.MustExecute("sudo systemctl start docker")
v.Env().RemoteHost.MustExecute("sudo usermod -a -G docker $(whoami)")
- v.Env().RemoteHost.ReconnectSSH()
+ v.Env().RemoteHost.Reconnect()
// prefetch docker image locally
v.Env().RemoteHost.MustExecute("docker pull ghcr.io/datadog/apps-npm-tools:main")
diff --git a/test/new-e2e/tests/npm/ec2_1host_test.go b/test/new-e2e/tests/npm/ec2_1host_test.go
index 49e356d38e731..29e9e424c8731 100644
--- a/test/new-e2e/tests/npm/ec2_1host_test.go
+++ b/test/new-e2e/tests/npm/ec2_1host_test.go
@@ -36,7 +36,6 @@ func hostDockerHttpbinEnvProvisioner(opt ...awshost.ProvisionerOption) e2e.Pulum
if err != nil {
return err
}
- env.Host.AwsEnvironment = &awsEnv
opts := []awshost.ProvisionerOption{
awshost.WithAgentOptions(agentparams.WithSystemProbeConfig(systemProbeConfigNPM)),
@@ -45,7 +44,7 @@ func hostDockerHttpbinEnvProvisioner(opt ...awshost.ProvisionerOption) e2e.Pulum
opts = append(opts, opt...)
}
params := awshost.GetProvisionerParams(opts...)
- awshost.Run(ctx, &env.Host, params)
+ awshost.Run(ctx, &env.Host, awshost.RunParams{Environment: &awsEnv, ProvisionerParams: params})
vmName := "httpbinvm"
@@ -59,7 +58,7 @@ func hostDockerHttpbinEnvProvisioner(opt ...awshost.ProvisionerOption) e2e.Pulum
}
// install docker.io
- manager, _, err := docker.NewManager(*awsEnv.CommonEnvironment, nginxHost)
+ manager, err := docker.NewManager(&awsEnv, nginxHost)
if err != nil {
return err
}
@@ -91,7 +90,7 @@ func (v *ec2VMSuite) SetupSuite() {
v.Env().RemoteHost.MustExecute("sudo apt install -y apache2-utils docker.io")
v.Env().RemoteHost.MustExecute("sudo usermod -a -G docker ubuntu")
- v.Env().RemoteHost.ReconnectSSH()
+ v.Env().RemoteHost.Reconnect()
// prefetch docker image locally
v.Env().RemoteHost.MustExecute("docker pull ghcr.io/datadog/apps-npm-tools:main")
diff --git a/test/new-e2e/tests/npm/ecs_1host_test.go b/test/new-e2e/tests/npm/ecs_1host_test.go
index 704d8c1aeab34..6e6abc90e609b 100644
--- a/test/new-e2e/tests/npm/ecs_1host_test.go
+++ b/test/new-e2e/tests/npm/ecs_1host_test.go
@@ -10,6 +10,8 @@ import (
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+ tifEcs "github.com/DataDog/test-infra-definitions/scenarios/aws/ecs"
+
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/components"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments"
@@ -18,6 +20,7 @@ import (
npmtools "github.com/DataDog/test-infra-definitions/components/datadog/apps/npm-tools"
"github.com/DataDog/test-infra-definitions/components/datadog/ecsagentparams"
"github.com/DataDog/test-infra-definitions/components/docker"
+ ecsComp "github.com/DataDog/test-infra-definitions/components/ecs"
"github.com/DataDog/test-infra-definitions/resources/aws"
"github.com/DataDog/test-infra-definitions/scenarios/aws/ec2"
)
@@ -39,7 +42,6 @@ func ecsHttpbinEnvProvisioner() e2e.PulumiEnvRunFunc[ecsHttpbinEnv] {
if err != nil {
return err
}
- env.ECS.AwsEnvironment = &awsEnv
vmName := "httpbinvm"
nginxHost, err := ec2.NewVM(awsEnv, vmName)
@@ -52,7 +54,7 @@ func ecsHttpbinEnvProvisioner() e2e.PulumiEnvRunFunc[ecsHttpbinEnv] {
}
// install docker.io
- manager, _, err := docker.NewManager(*awsEnv.CommonEnvironment, nginxHost)
+ manager, err := docker.NewManager(&awsEnv, nginxHost)
if err != nil {
return err
}
@@ -64,17 +66,15 @@ func ecsHttpbinEnvProvisioner() e2e.PulumiEnvRunFunc[ecsHttpbinEnv] {
}
params := envecs.GetProvisionerParams(
- envecs.WithECSLinuxECSOptimizedNodeGroup(),
+ envecs.WithAwsEnv(&awsEnv),
+ envecs.WithECSOptions(tifEcs.WithLinuxNodeGroup()),
envecs.WithAgentOptions(ecsagentparams.WithAgentServiceEnvVariable("DD_SYSTEM_PROBE_NETWORK_ENABLED", "true")),
+ envecs.WithWorkloadApp(func(e aws.Environment, clusterArn pulumi.StringInput) (*ecsComp.Workload, error) {
+ testURL := "http://" + env.HTTPBinHost.Address + "/"
+ return npmtools.EcsAppDefinition(e, clusterArn, testURL)
+ }),
)
envecs.Run(ctx, &env.ECS, params)
-
- // Workload
- testURL := "http://" + env.HTTPBinHost.Address + "/"
- if _, err := npmtools.EcsAppDefinition(awsEnv, env.ClusterArn, testURL); err != nil {
- return err
- }
-
return nil
}
}
diff --git a/test/new-e2e/tests/orchestrator/apply.go b/test/new-e2e/tests/orchestrator/apply.go
index 7857fdb20d43e..1f1f90dc1dbd2 100644
--- a/test/new-e2e/tests/orchestrator/apply.go
+++ b/test/new-e2e/tests/orchestrator/apply.go
@@ -40,7 +40,7 @@ func Apply(ctx *pulumi.Context) error {
// Deploy testing workload
if awsEnv.TestingWorkloadDeploy() {
- if _, err := redis.K8sAppDefinition(*awsEnv.CommonEnvironment, kindKubeProvider, "workload-redis", agentDependency); err != nil {
+ if _, err := redis.K8sAppDefinition(awsEnv, kindKubeProvider, "workload-redis", true, agentDependency); err != nil {
return fmt.Errorf("failed to install redis: %w", err)
}
}
@@ -62,7 +62,12 @@ func createCluster(ctx *pulumi.Context) (*resAws.Environment, *localKubernetes.C
return nil, nil, nil, err
}
- kindCluster, err := localKubernetes.NewKindCluster(*awsEnv.CommonEnvironment, vm, awsEnv.CommonNamer.ResourceName("kind"), "kind", awsEnv.KubernetesVersion())
+ installEcrCredsHelperCmd, err := ec2.InstallECRCredentialsHelper(awsEnv, vm)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ kindCluster, err := localKubernetes.NewKindCluster(&awsEnv, vm, awsEnv.CommonNamer().ResourceName("kind"), "kind", awsEnv.KubernetesVersion(), utils.PulumiDependsOn(installEcrCredsHelperCmd))
if err != nil {
return nil, nil, nil, err
}
@@ -98,7 +103,7 @@ func deployAgent(ctx *pulumi.Context, awsEnv *resAws.Environment, cluster *local
if fakeIntake, err = fakeintake.NewECSFargateInstance(*awsEnv, cluster.Name(), fakeIntakeOptions...); err != nil {
return nil, err
}
- if err := fakeIntake.Export(awsEnv.Ctx, nil); err != nil {
+ if err := fakeIntake.Export(awsEnv.Ctx(), nil); err != nil {
return nil, err
}
}
@@ -108,7 +113,7 @@ func deployAgent(ctx *pulumi.Context, awsEnv *resAws.Environment, cluster *local
// Deploy the agent
if awsEnv.AgentDeploy() {
customValues := fmt.Sprintf(agentCustomValuesFmt, clusterName)
- helmComponent, err := agent.NewHelmInstallation(*awsEnv.CommonEnvironment, agent.HelmInstallationArgs{
+ helmComponent, err := agent.NewHelmInstallation(awsEnv, agent.HelmInstallationArgs{
KubeProvider: kindKubeProvider,
Namespace: "datadog",
ValuesYAML: pulumi.AssetOrArchiveArray{
@@ -129,7 +134,7 @@ func deployAgent(ctx *pulumi.Context, awsEnv *resAws.Environment, cluster *local
// Deploy standalone dogstatsd
if awsEnv.DogstatsdDeploy() {
- if _, err := dogstatsdstandalone.K8sAppDefinition(*awsEnv.CommonEnvironment, kindKubeProvider, "dogstatsd-standalone", fakeIntake, false, clusterName); err != nil {
+ if _, err := dogstatsdstandalone.K8sAppDefinition(awsEnv, kindKubeProvider, "dogstatsd-standalone", fakeIntake, false, clusterName); err != nil {
return nil, err
}
}
diff --git a/test/new-e2e/tests/orchestrator/suite_test.go b/test/new-e2e/tests/orchestrator/suite_test.go
index 11c72419c100e..448561b8c1954 100644
--- a/test/new-e2e/tests/orchestrator/suite_test.go
+++ b/test/new-e2e/tests/orchestrator/suite_test.go
@@ -78,7 +78,7 @@ func (suite *k8sSuite) SetupSuite() {
fmt.Fprint(os.Stderr, err.Error())
}
}
- _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure(ctx, "orch-kind-cluster", stackConfig, Apply, false, nil)
+ _, stackOutput, err := infra.GetStackManager().GetStackNoDeleteOnFailure(ctx, "orch-kind-cluster", Apply, infra.WithConfigMap(stackConfig))
suite.printKubeConfig(stackOutput)
diff --git a/test/new-e2e/tests/windows/base_agent_installer_suite.go b/test/new-e2e/tests/windows/base_agent_installer_suite.go
index ef01430bba4d3..51329c74480be 100644
--- a/test/new-e2e/tests/windows/base_agent_installer_suite.go
+++ b/test/new-e2e/tests/windows/base_agent_installer_suite.go
@@ -7,12 +7,13 @@
package windows
import (
+ "path/filepath"
+
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/components"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner"
platformCommon "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-platform/common"
windowsAgent "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/agent"
- "path/filepath"
)
// BaseAgentInstallerSuite is a base class for the Windows Agent installer suites
@@ -36,7 +37,7 @@ func (b *BaseAgentInstallerSuite[Env]) InstallAgent(host *components.RemoteHost,
// NewTestClientForHost creates a new TestClient for a given host.
func (b *BaseAgentInstallerSuite[Env]) NewTestClientForHost(host *components.RemoteHost) *platformCommon.TestClient {
// We could bring the code from NewWindowsTestClient here
- return platformCommon.NewWindowsTestClient(b.T(), host)
+ return platformCommon.NewWindowsTestClient(b, host)
}
// BeforeTest overrides the base BeforeTest to perform some additional per-test setup like configuring the output directory.
@@ -44,7 +45,7 @@ func (b *BaseAgentInstallerSuite[Env]) BeforeTest(suiteName, testName string) {
b.BaseSuite.BeforeTest(suiteName, testName)
var err error
- b.OutputDir, err = runner.GetTestOutputDir(runner.GetProfile(), b.T())
+ b.OutputDir, err = runner.GetProfile().GetOutputDir()
if err != nil {
b.T().Fatalf("should get output dir")
}
diff --git a/test/new-e2e/tests/windows/command/agent.go b/test/new-e2e/tests/windows/command/agent.go
new file mode 100644
index 0000000000000..93f5bc9fd3dff
--- /dev/null
+++ b/test/new-e2e/tests/windows/command/agent.go
@@ -0,0 +1,37 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+// Package command provides Windows command helpers
+package command
+
+const (
+ // DatadogCodeSignatureThumbprint is the thumbprint of the Datadog Code Signing certificate
+ // Valid From: May 2023
+ // Valid To: May 2025
+ DatadogCodeSignatureThumbprint = `B03F29CC07566505A718583E9270A6EE17678742`
+ // RegistryKeyPath is the root registry key that the Datadog Agent uses to store some state
+ RegistryKeyPath = "HKLM:\\SOFTWARE\\Datadog\\Datadog Agent"
+ // DefaultInstallPath is the default install path for the Datadog Agent
+ DefaultInstallPath = `C:\Program Files\Datadog\Datadog Agent`
+ // DefaultConfigRoot is the default config root for the Datadog Agent
+ DefaultConfigRoot = `C:\ProgramData\Datadog`
+ // DefaultAgentUserName is the default user name for the Datadog Agent
+ DefaultAgentUserName = `ddagentuser`
+)
+
+// GetDatadogAgentProductCode returns the product code GUID for the Datadog Agent
+func GetDatadogAgentProductCode() string {
+ return GetProductCodeByName("Datadog Agent")
+}
+
+// GetInstallPathFromRegistry gets the install path from the registry, e.g. C:\Program Files\Datadog\Datadog Agent
+func GetInstallPathFromRegistry() string {
+ return GetRegistryValue(RegistryKeyPath, "InstallPath")
+}
+
+// GetConfigRootFromRegistry gets the config root from the registry, e.g. C:\ProgramData\Datadog
+func GetConfigRootFromRegistry() string {
+ return GetRegistryValue(RegistryKeyPath, "ConfigRoot")
+}
diff --git a/test/new-e2e/tests/windows/command/doc.go b/test/new-e2e/tests/windows/command/doc.go
new file mode 100644
index 0000000000000..f7a93d30f9639
--- /dev/null
+++ b/test/new-e2e/tests/windows/command/doc.go
@@ -0,0 +1,7 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+// Package command provides Windows command helpers
+package command
diff --git a/test/new-e2e/tests/windows/command/product.go b/test/new-e2e/tests/windows/command/product.go
new file mode 100644
index 0000000000000..6d875c8f171bb
--- /dev/null
+++ b/test/new-e2e/tests/windows/command/product.go
@@ -0,0 +1,18 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+// Package command provides Windows command helpers
+package command
+
+import (
+ "fmt"
+)
+
+// GetProductCodeByName returns the product code GUID for the given product name
+func GetProductCodeByName(name string) string {
+ // Read from registry instead of using Win32_Product, which has negative side effects
+ // https://gregramsey.net/2012/02/20/win32_product-is-evil/
+ return fmt.Sprintf(`(@(Get-ChildItem -Path "HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall" -Recurse ; Get-ChildItem -Path "HKLM:SOFTWARE\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall" -Recurse ) | Where {$_.GetValue("DisplayName") -like "%s" }).PSChildName`, name)
+}
diff --git a/test/new-e2e/tests/windows/command/registry.go b/test/new-e2e/tests/windows/command/registry.go
new file mode 100644
index 0000000000000..6bb796c34d55f
--- /dev/null
+++ b/test/new-e2e/tests/windows/command/registry.go
@@ -0,0 +1,21 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+// Package command provides Windows command helpers
+package command
+
+import (
+ "fmt"
+)
+
+// GetRegistryValue returns a command string to get a registry value
+func GetRegistryValue(path string, value string) string {
+ return fmt.Sprintf("Get-ItemPropertyValue -Path '%s' -Name '%s'", path, value)
+}
+
+// RegistryKeyExists returns a command to check if a registry path exists
+func RegistryKeyExists(path string) string {
+ return fmt.Sprintf("Test-Path -Path '%s'", path)
+}
diff --git a/test/new-e2e/tests/windows/common/powershell/command_builder.go b/test/new-e2e/tests/windows/common/powershell/command_builder.go
index b4e17e5e3d949..9275c66827087 100644
--- a/test/new-e2e/tests/windows/common/powershell/command_builder.go
+++ b/test/new-e2e/tests/windows/common/powershell/command_builder.go
@@ -8,8 +8,9 @@ package powershell
import (
"fmt"
- "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components"
"strings"
+
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components"
)
type powerShellCommandBuilder struct {
@@ -18,6 +19,7 @@ type powerShellCommandBuilder struct {
// PsHost creates a new powerShellCommandBuilder object, which makes it easier to write PowerShell script.
//
+//revive:disable
//nolint:revive
func PsHost() *powerShellCommandBuilder {
return &powerShellCommandBuilder{
@@ -27,6 +29,8 @@ func PsHost() *powerShellCommandBuilder {
}
}
+//revive:enable
+
// GetLastBootTime uses the win32_operatingsystem Cim class to get the last time the computer was booted.
func (ps *powerShellCommandBuilder) GetLastBootTime() *powerShellCommandBuilder {
ps.cmds = append(ps.cmds, "(Get-CimInstance -ClassName win32_operatingsystem).lastbootuptime")
@@ -150,6 +154,34 @@ func (ps *powerShellCommandBuilder) WaitForServiceStatus(serviceName, status str
return ps
}
+// DisableWindowsDefender creates a command to try and disable Windows Defender without uninstalling it
+func (ps *powerShellCommandBuilder) DisableWindowsDefender() *powerShellCommandBuilder {
+ // ScheduleDay = 8 means never
+ ps.cmds = append(ps.cmds, `
+if ((Get-MpComputerStatus).IsTamperProtected) {
+ Write-Error "Windows Defender is tamper protected, unable to modify settings"
+}
+(@{DisableArchiveScanning = $true },
+ @{DisableRealtimeMonitoring = $true },
+ @{DisableBehaviorMonitoring = $true },
+ @{MAPSReporting = 0 },
+ @{ScanScheduleDay = 8 },
+ @{RemediationScheduleDay = 8 }
+) | ForEach-Object { Set-MpPreference @_ }`)
+ // Even though Microsoft claims to have deprecated this option as of Platform Version 4.18.2108.4,
+ // it still works for me on Platform Version 4.18.23110.3 after a reboot, so set it anywawy.
+ ps.cmds = append(ps.cmds, `mkdir -Path "HKLM:\SOFTWARE\Policies\Microsoft\Windows Defender"`)
+ ps.cmds = append(ps.cmds, `Set-ItemProperty -Path "HKLM:\SOFTWARE\Policies\Microsoft\Windows Defender" -Name DisableAntiSpyware -Value 1`)
+
+ return ps
+}
+
+// UninstallWindowsDefender creates a command to uninstall Windows Defender
+func (ps *powerShellCommandBuilder) UninstallWindowsDefender() *powerShellCommandBuilder {
+ ps.cmds = append(ps.cmds, "Uninstall-WindowsFeature -Name Windows-Defender")
+ return ps
+}
+
// Execute compiles the list of PowerShell commands into one script and runs it on the given host
func (ps *powerShellCommandBuilder) Execute(host *components.RemoteHost) (string, error) {
return host.Execute(ps.Compile())
diff --git a/test/new-e2e/tests/windows/components/defender/component.go b/test/new-e2e/tests/windows/components/defender/component.go
new file mode 100644
index 0000000000000..975b3d10654a3
--- /dev/null
+++ b/test/new-e2e/tests/windows/components/defender/component.go
@@ -0,0 +1,62 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package defender contains code to control the behavior of Windows defender in the E2E tests
+package defender
+
+import (
+ "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/powershell"
+ "github.com/DataDog/test-infra-definitions/common"
+ "github.com/DataDog/test-infra-definitions/common/config"
+ "github.com/DataDog/test-infra-definitions/common/namer"
+ "github.com/DataDog/test-infra-definitions/common/utils"
+ "github.com/DataDog/test-infra-definitions/components/command"
+ "github.com/DataDog/test-infra-definitions/components/remote"
+ "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+)
+
+// Manager contains the resources to manage Windows Defender
+type Manager struct {
+ namer namer.Namer
+ host *remote.Host
+ Resources []pulumi.Resource
+}
+
+// NewDefender creates a new instance of the Windows NewDefender component
+func NewDefender(e *config.CommonEnvironment, host *remote.Host, options ...Option) (*Manager, error) {
+ params, err := common.ApplyOption(&Configuration{}, options)
+ if err != nil {
+ return nil, err
+ }
+ manager := &Manager{
+ namer: e.CommonNamer().WithPrefix("windows-defender"),
+ host: host,
+ }
+ var deps []pulumi.ResourceOption
+ if params.Disabled {
+ cmd, err := host.OS.Runner().Command(manager.namer.ResourceName("disable-defender"), &command.Args{
+ Create: pulumi.String(powershell.PsHost().
+ DisableWindowsDefender().
+ Compile()),
+ }, deps...)
+ if err != nil {
+ return nil, err
+ }
+ deps = append(deps, utils.PulumiDependsOn(cmd))
+ manager.Resources = append(manager.Resources, cmd)
+ }
+ if params.Uninstall {
+ cmd, err := host.OS.Runner().Command(manager.namer.ResourceName("uninstall-defender"), &command.Args{
+ Create: pulumi.String(powershell.PsHost().
+ UninstallWindowsDefender().
+ Compile()),
+ }, deps...)
+ if err != nil {
+ return nil, err
+ }
+ manager.Resources = append(manager.Resources, cmd)
+ }
+ return manager, nil
+}
diff --git a/test/new-e2e/tests/windows/components/defender/params.go b/test/new-e2e/tests/windows/components/defender/params.go
new file mode 100644
index 0000000000000..91b8f363e64ef
--- /dev/null
+++ b/test/new-e2e/tests/windows/components/defender/params.go
@@ -0,0 +1,32 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package defender contains the Windows NewDefender component configuration.
+package defender
+
+// Configuration represents the Windows NewDefender configuration
+type Configuration struct {
+ Disabled bool
+ Uninstall bool
+}
+
+// Option is an optional function parameter type for Configuration options
+type Option = func(*Configuration) error
+
+// WithDefenderDisabled configures the NewDefender component to disable Windows NewDefender
+func WithDefenderDisabled() func(*Configuration) error {
+ return func(p *Configuration) error {
+ p.Disabled = true
+ return nil
+ }
+}
+
+// WithDefenderUninstalled configures the NewDefender component to uninstall Windows NewDefender
+func WithDefenderUninstalled() func(*Configuration) error {
+ return func(p *Configuration) error {
+ p.Uninstall = true
+ return nil
+ }
+}
diff --git a/test/new-e2e/tests/windows/install-test/install_test.go b/test/new-e2e/tests/windows/install-test/install_test.go
index 498800913e9c9..6da2082880793 100644
--- a/test/new-e2e/tests/windows/install-test/install_test.go
+++ b/test/new-e2e/tests/windows/install-test/install_test.go
@@ -25,9 +25,10 @@ import (
componentos "github.com/DataDog/test-infra-definitions/components/os"
"github.com/DataDog/test-infra-definitions/scenarios/aws/ec2"
+ "testing"
+
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
- "testing"
)
type agentMSISuite struct {
@@ -250,7 +251,7 @@ func (is *agentMSISuite) TestAgentUser() {
for _, tc := range tcs {
if !is.Run(tc.testname, func() {
// subtest needs a new output dir
- is.OutputDir, err = runner.GetTestOutputDir(runner.GetProfile(), is.T())
+ is.OutputDir, err = runner.GetProfile().GetOutputDir()
is.Require().NoError(err, "should get output dir")
// initialize test helper
@@ -295,7 +296,7 @@ func (is *agentMSISuite) newTester(vm *components.RemoteHost, options ...TesterO
WithAgentPackage(is.AgentPackage),
}
testerOpts = append(testerOpts, options...)
- t, err := NewTester(is.T(), vm, testerOpts...)
+ t, err := NewTester(is, vm, testerOpts...)
is.Require().NoError(err, "should create tester")
return t
}
diff --git a/test/new-e2e/tests/windows/install-test/installtester.go b/test/new-e2e/tests/windows/install-test/installtester.go
index ed5d166a5a378..7deaaf0a58527 100644
--- a/test/new-e2e/tests/windows/install-test/installtester.go
+++ b/test/new-e2e/tests/windows/install-test/installtester.go
@@ -10,10 +10,11 @@ import (
"strings"
"github.com/DataDog/datadog-agent/test/new-e2e/pkg/components"
+ "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e"
"github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-platform/common"
windows "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common"
windowsAgent "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/agent"
- "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/install-test/service-test"
+ servicetest "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/install-test/service-test"
"testing"
@@ -41,13 +42,14 @@ type Tester struct {
type TesterOption func(*Tester)
// NewTester creates a new Tester
-func NewTester(tt *testing.T, host *components.RemoteHost, opts ...TesterOption) (*Tester, error) {
+func NewTester(context e2e.Context, host *components.RemoteHost, opts ...TesterOption) (*Tester, error) {
t := &Tester{}
+ tt := context.T()
var err error
t.host = host
- t.InstallTestClient = common.NewWindowsTestClient(tt, t.host)
+ t.InstallTestClient = common.NewWindowsTestClient(context, t.host)
t.hostInfo, err = windows.GetHostInfo(t.host)
if err != nil {
return nil, err
diff --git a/test/system/dogstatsd/receive_and_forward_test.go b/test/system/dogstatsd/receive_and_forward_test.go
index 666704513ecd3..7a91b0cd15d34 100644
--- a/test/system/dogstatsd/receive_and_forward_test.go
+++ b/test/system/dogstatsd/receive_and_forward_test.go
@@ -7,20 +7,14 @@ package dogstatsd_test
import (
"encoding/json"
- "fmt"
"testing"
"time"
log "github.com/cihub/seelog"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl"
- "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck"
- "github.com/DataDog/datadog-agent/pkg/serializer/compression"
"github.com/DataDog/datadog-agent/pkg/serializer/compression/utils"
-
- pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
)
func testMetadata(t *testing.T, d *dogstatsdTest) {
@@ -51,7 +45,7 @@ func TestReceiveAndForward(t *testing.T) {
"zstd": {kind: utils.ZstdKind},
}
- for name, tc := range tests {
+ for name := range tests {
t.Run(name, func(t *testing.T) {
d := setupDogstatsd(t)
defer d.teardown()
@@ -71,22 +65,22 @@ func TestReceiveAndForward(t *testing.T) {
requests := d.getRequests()
require.Len(t, requests, 1)
- mockConfig := pkgconfigsetup.Conf()
- mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind)
- strategy := compression.NewCompressorStrategy(mockConfig)
+ // mockConfig := mock.New(t)
+ // mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind)
+ // strategy := compression.NewCompressorStrategy(mockConfig)
- sc := []servicecheck.ServiceCheck{}
- decompressedBody, err := strategy.Decompress([]byte(requests[0]))
- require.NoError(t, err, "Could not decompress request body")
- err = json.Unmarshal(decompressedBody, &sc)
- require.NoError(t, err, fmt.Sprintf("Could not Unmarshal request body: %s", decompressedBody))
+ // sc := []servicecheck.ServiceCheck{}
+ // decompressedBody, err := strategy.Decompress([]byte(requests[0]))
+ // require.NoError(t, err, "Could not decompress request body")
+ // err = json.Unmarshal(decompressedBody, &sc)
+ // require.NoError(t, err, fmt.Sprintf("Could not Unmarshal request body: %s", decompressedBody))
- require.Len(t, sc, 2)
- assert.Equal(t, sc[0].CheckName, "test.ServiceCheck")
- assert.Equal(t, sc[0].Status, servicecheck.ServiceCheckOK)
+ // require.Len(t, sc, 2)
+ // assert.Equal(t, sc[0].CheckName, "test.ServiceCheck")
+ // assert.Equal(t, sc[0].Status, servicecheck.ServiceCheckOK)
- assert.Equal(t, sc[1].CheckName, "datadog.agent.up")
- assert.Equal(t, sc[1].Status, servicecheck.ServiceCheckOK)
+ // assert.Equal(t, sc[1].CheckName, "datadog.agent.up")
+ // assert.Equal(t, sc[1].Status, servicecheck.ServiceCheckOK)
})
}
}