diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index bcf2998e991e3..584084c4d5fe0 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -28,7 +28,7 @@ # Todo: is this file still needed? /Makefile.trace @DataDog/agent-platform -/release.json @DataDog/agent-platform @DataDog/agent-shared-components @DataDog/agent-metrics-logs @DataDog/windows-kernel-integrations +/release.json @DataDog/agent-platform @DataDog/agent-metrics-logs @DataDog/windows-kernel-integrations /requirements.txt @DataDog/agent-platform /pyproject.toml @DataDog/agent-platform /setup.cfg @DataDog/agent-platform @@ -306,16 +306,17 @@ /pkg/collector/corechecks/windows_event_log/ @DataDog/windows-agent /pkg/config/ @DataDog/agent-shared-components /pkg/config/config_template.yaml @DataDog/agent-shared-components @DataDog/documentation -/pkg/config/apm.go @DataDog/agent-apm +/pkg/config/setup/apm.go @DataDog/agent-apm /pkg/config/autodiscovery/ @Datadog/container-integrations /pkg/config/env @DataDog/container-integrations @DataDog/container-app /pkg/config/logs @Datadog/agent-shared-components @Datadog/agent-platform /pkg/config/logs/internal/seelog/seelog_config.go @Datadog/agent-shared-components -/pkg/config/process*.go @DataDog/processes -/pkg/config/system_probe.go @DataDog/ebpf-platform -/pkg/config/system_probe_cws.go @DataDog/agent-security -/pkg/config/system_probe_cws_notwin.go @DataDog/agent-security -/pkg/config/system_probe_cws_windows.go @DataDog/windows-kernel-integrations +/pkg/config/setup @DataDog/agent-shared-components +/pkg/config/setup/process*.go @DataDog/processes +/pkg/config/setup/system_probe.go @DataDog/ebpf-platform +/pkg/config/setup/system_probe_cws.go @DataDog/agent-security +/pkg/config/setup/system_probe_cws_notwin.go @DataDog/agent-security +/pkg/config/setup/system_probe_cws_windows.go @DataDog/windows-kernel-integrations /pkg/config/remote/ @DataDog/remote-config /pkg/config/remote/meta/ @DataDog/remote-config @DataDog/software-integrity-and-trust /pkg/containerlifecycle/ @Datadog/container-integrations @@ -340,7 +341,6 @@ /pkg/proto/datadog/trace @DataDog/agent-apm /pkg/remoteconfig/ @DataDog/remote-config /pkg/runtime/ @DataDog/agent-shared-components -/pkg/secrets/ @DataDog/agent-shared-components /pkg/serializer/ @DataDog/agent-metrics-logs /pkg/tagger/ @DataDog/container-integrations /pkg/tagset/ @DataDog/agent-shared-components diff --git a/.gitlab/e2e.yml b/.gitlab/e2e.yml index b0c99cc1f88db..d5c2c162bbfa8 100644 --- a/.gitlab/e2e.yml +++ b/.gitlab/e2e.yml @@ -207,6 +207,20 @@ new-e2e-agent-shared-components-main: # Temporary, until we manage to stabilize those tests. allow_failure: true +.agent-subcommands-tests-matrix: &agent-subcommands-tests-matrix + parallel: + matrix: + - EXTRA_PARAMS: --run TestSubcommandSuite + - EXTRA_PARAMS: --run TestAgentSecretSuite + - EXTRA_PARAMS: --run TestAgentConfigSuite + - EXTRA_PARAMS: --run TestAgentHostnameEC2Suite + - EXTRA_PARAMS: --run TestAgentDiagnoseEC2Suite + - EXTRA_PARAMS: --run TestAgentConfigCheckSuite + - EXTRA_PARAMS: --run TestLinuxFlareSuite + - EXTRA_PARAMS: --run TestWindowsFlareSuite + - EXTRA_PARAMS: --run TestLinuxSecretSuite + - EXTRA_PARAMS: --run TestWindowsSecretSuite + new-e2e-agent-subcommands-dev: extends: .new_e2e_template rules: !reference [.on_dev_branch_manual] @@ -214,6 +228,7 @@ new-e2e-agent-subcommands-dev: variables: TARGETS: ./tests/agent-subcommands TEAM: agent-shared-components + <<: *agent-subcommands-tests-matrix new-e2e-agent-subcommands-main: extends: .new_e2e_template @@ -224,6 +239,9 @@ new-e2e-agent-subcommands-main: TEAM: agent-shared-components # Temporary, until we manage to stabilize those tests. allow_failure: true + <<: *agent-subcommands-tests-matrix + + new-e2e-language-detection-dev: extends: .new_e2e_template diff --git a/.gitlab/e2e_test_junit_upload.yml b/.gitlab/e2e_test_junit_upload.yml index 2f3ce4e41b457..7bdcf421eac95 100644 --- a/.gitlab/e2e_test_junit_upload.yml +++ b/.gitlab/e2e_test_junit_upload.yml @@ -39,6 +39,23 @@ e2e_test_junit_upload: - new-e2e-agent-platform-install-script-centos-fips-a7-x86_64 - new-e2e-agent-platform-install-script-centos-fips-iot-agent-a7-x86_64 - new-e2e-agent-platform-install-script-centos-fips-dogstatsd-a7-x86_64 + - new-e2e-agent-platform-step-by-step-debian-a7-x64 + - new-e2e-agent-platform-step-by-step-debian-a7-arm64 + - new-e2e-agent-platform-step-by-step-debian-a6-x86_64 + - new-e2e-agent-platform-step-by-step-debian-a6-arm64 + - new-e2e-agent-platform-step-by-step-ubuntu-a6-x86_64 + - new-e2e-agent-platform-step-by-step-ubuntu-a6-arm64 + - new-e2e-agent-platform-step-by-step-ubuntu-a7-x86_64 + - new-e2e-agent-platform-step-by-step-ubuntu-a7-arm64 + - new-e2e-agent-platform-step-by-step-suse-a6-x86_64 + - new-e2e-agent-platform-step-by-step-suse-a7-x86_64 + - new-e2e-agent-platform-step-by-step-suse-a7-arm64 + - new-e2e-agent-platform-step-by-step-centos-a6-x86_64 + - new-e2e-agent-platform-step-by-step-centos-a7-x86_64 + - new-e2e-agent-platform-step-by-step-amazonlinux-a6-x86_64 + - new-e2e-agent-platform-step-by-step-amazonlinux-a6-arm64 + - new-e2e-agent-platform-step-by-step-amazonlinux-a7-x64 + - new-e2e-agent-platform-step-by-step-amazonlinux-a7-arm64 - new-e2e-npm-main - new-e2e-aml-main - new-e2e-process-main diff --git a/.gitlab/kitchen_testing/centos.yml b/.gitlab/kitchen_testing/centos.yml index a89eefef8dfb0..c743e22c958e5 100644 --- a/.gitlab/kitchen_testing/centos.yml +++ b/.gitlab/kitchen_testing/centos.yml @@ -99,38 +99,6 @@ # We only want to run step-by-step tests on deploy pipelines, # which is why they have a different rule (if_deploy_6/7) -kitchen_centos_step_by_step_agent-a6: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_centos_without_fips_a6 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a6] - -kitchen_centos_fips_step_by_step_agent-a6: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_centos_with_fips_a6 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a6] - -kitchen_centos_step_by_step_agent-a7: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_centos_without_fips_a7 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a7] - -kitchen_centos_fips_step_by_step_agent-a7: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_centos_with_fips_a7 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a7] - # Agent 5 RPMs won't install on CentOS/RHEL 8 in FIPS mode, so we always # run upgrade5 on all systems with FIPS off kitchen_centos_upgrade5_agent-a6: diff --git a/.gitlab/kitchen_testing/debian.yml b/.gitlab/kitchen_testing/debian.yml index 6f0fff4d2bd14..54c4bc196044b 100644 --- a/.gitlab/kitchen_testing/debian.yml +++ b/.gitlab/kitchen_testing/debian.yml @@ -85,20 +85,6 @@ kitchen_debian_install_script_heroku_agent-a6: # We only want to run step-by-step tests on deploy pipelines, # which is why they have a different rule (if_deploy_6/7) -kitchen_debian_step_by_step_agent-a6: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_debian_a6_x64 - - .kitchen_test_step_by_step_agent - rules: !reference [.on_deploy_a6] - -kitchen_debian_step_by_step_agent-a7: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_debian_a7_x64 - - .kitchen_test_step_by_step_agent - rules: !reference [.on_deploy_a7] - kitchen_debian_upgrade5_agent-a6: extends: - .kitchen_scenario_debian_a6_x64 diff --git a/.gitlab/kitchen_testing/suse.yml b/.gitlab/kitchen_testing/suse.yml index d3e66b7e6d82c..753e3eee7afbd 100644 --- a/.gitlab/kitchen_testing/suse.yml +++ b/.gitlab/kitchen_testing/suse.yml @@ -78,22 +78,6 @@ kitchen_suse_install_script_dogstatsd_x64-a7: # We only want to run step-by-step tests on deploy pipelines, # which is why they have a different rule (if_deploy_6/7) -kitchen_suse_step_by_step_agent_x64-a6: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_suse_x64_a6 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a6] - -kitchen_suse_step_by_step_agent_x64-a7: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_suse_x64_a7 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a7] - kitchen_suse_install_script_agent_arm64-a7: # Run install script test on branches, on a reduced number of platforms rules: diff --git a/.gitlab/kitchen_testing/ubuntu.yml b/.gitlab/kitchen_testing/ubuntu.yml index a32c1af238eae..42d2e3b4f3c41 100644 --- a/.gitlab/kitchen_testing/ubuntu.yml +++ b/.gitlab/kitchen_testing/ubuntu.yml @@ -68,38 +68,6 @@ # We only want to run step-by-step tests on deploy pipelines, # which is why they have a different rule (if_deploy_6/7) -kitchen_ubuntu_step_by_step_agent-a6_x64: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_ubuntu_a6_x64 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a6] - -kitchen_ubuntu_step_by_step_agent-a6_arm64: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_ubuntu_a6_arm64 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a6] - -kitchen_ubuntu_step_by_step_agent-a7_x64: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_ubuntu_a7_x64 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a7] - -kitchen_ubuntu_step_by_step_agent-a7_arm64: - extends: - - .kitchen_os_with_cws - - .kitchen_scenario_ubuntu_a7_arm64 - - .kitchen_test_step_by_step_agent - rules: - !reference [.on_deploy_a7] - kitchen_ubuntu_upgrade5_agent-a6: extends: - .kitchen_scenario_ubuntu_a6_x64 diff --git a/.gitlab/new-e2e_common/testing.yml b/.gitlab/new-e2e_common/testing.yml index bc8867d5f13a9..4370f4e22c721 100644 --- a/.gitlab/new-e2e_common/testing.yml +++ b/.gitlab/new-e2e_common/testing.yml @@ -14,4 +14,10 @@ variables: TARGETS: ./tests/agent-platform/install-script TEAM: agent-platform + EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --cws-supported-osversion $E2E_CWS_SUPPORTED_OSVERS --major-version $AGENT_MAJOR_VERSION --arch $E2E_ARCH --flavor $FLAVOR --no-verbose + +.new-e2e_step_by_step: + variables: + TARGETS: ./tests/agent-platform/step-by-step + TEAM: agent-platform EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --cws-supported-osversion $E2E_CWS_SUPPORTED_OSVERS --major-version $AGENT_MAJOR_VERSION --arch $E2E_ARCH --flavor $FLAVOR diff --git a/.gitlab/new-e2e_testing/amazonlinux.yml b/.gitlab/new-e2e_testing/amazonlinux.yml index a88cf563d3d4f..5a4d312c4ab64 100644 --- a/.gitlab/new-e2e_testing/amazonlinux.yml +++ b/.gitlab/new-e2e_testing/amazonlinux.yml @@ -37,7 +37,7 @@ new-e2e-agent-platform-install-script-amazonlinux-a6-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_amazonlinux @@ -48,7 +48,7 @@ new-e2e-agent-platform-install-script-amazonlinux-a6-x86_64: new-e2e-agent-platform-install-script-amazonlinux-a6-arm64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_amazonlinux @@ -59,7 +59,7 @@ new-e2e-agent-platform-install-script-amazonlinux-a6-arm64: new-e2e-agent-platform-install-script-amazonlinux-a7-x64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_amazonlinux @@ -72,7 +72,7 @@ new-e2e-agent-platform-install-script-amazonlinux-a7-x64: new-e2e-agent-platform-install-script-amazonlinux-a7-arm64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_amazonlinux @@ -82,3 +82,56 @@ new-e2e-agent-platform-install-script-amazonlinux-a7-arm64: !reference [.on_all_new-e2e_tests_a7] variables: FLAVOR: datadog-agent + + +new-e2e-agent-platform-step-by-step-amazonlinux-a6-x86_64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_amazonlinux + - .new-e2e_amazonlinux_a6_x86_64 + - .new-e2e_agent_a6 + rules: + !reference [.on_deploy_a6] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-amazonlinux-a6-arm64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_amazonlinux + - .new-e2e_amazonlinux_a6_arm64 + - .new-e2e_agent_a6 + rules: + !reference [.on_deploy_a6] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-amazonlinux-a7-x64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_amazonlinux + - .new-e2e_amazonlinux_a7_x86_64 + - .new-e2e_agent_a7 + rules: + !reference [.on_deploy_a7] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-amazonlinux-a7-arm64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_amazonlinux + - .new-e2e_amazonlinux_a7_arm64 + - .new-e2e_agent_a7 + rules: + !reference [.on_deploy_a7] + variables: + FLAVOR: datadog-agent diff --git a/.gitlab/new-e2e_testing/centos.yml b/.gitlab/new-e2e_testing/centos.yml index 70c1170d6fd46..7cbe9b62e960d 100644 --- a/.gitlab/new-e2e_testing/centos.yml +++ b/.gitlab/new-e2e_testing/centos.yml @@ -39,7 +39,7 @@ new-e2e-agent-platform-install-script-centos-a6-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_centos @@ -50,7 +50,7 @@ new-e2e-agent-platform-install-script-centos-a6-x86_64: new-e2e-agent-platform-install-script-centos-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_centos @@ -63,7 +63,7 @@ new-e2e-agent-platform-install-script-centos-a7-x86_64: new-e2e-agent-platform-install-script-centos-iot-agent-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_centos @@ -76,7 +76,7 @@ new-e2e-agent-platform-install-script-centos-iot-agent-a7-x86_64: new-e2e-agent-platform-install-script-centos-dogstatsd-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_centos @@ -87,7 +87,7 @@ new-e2e-agent-platform-install-script-centos-dogstatsd-a7-x86_64: new-e2e-agent-platform-install-script-centos-fips-a6-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_centos @@ -98,7 +98,7 @@ new-e2e-agent-platform-install-script-centos-fips-a6-x86_64: new-e2e-agent-platform-install-script-centos-fips-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_centos @@ -111,7 +111,7 @@ new-e2e-agent-platform-install-script-centos-fips-a7-x86_64: new-e2e-agent-platform-install-script-centos-fips-iot-agent-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_centos @@ -124,7 +124,7 @@ new-e2e-agent-platform-install-script-centos-fips-iot-agent-a7-x86_64: new-e2e-agent-platform-install-script-centos-fips-dogstatsd-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_centos @@ -132,3 +132,29 @@ new-e2e-agent-platform-install-script-centos-fips-dogstatsd-a7-x86_64: - .new-e2e_agent_a7 variables: FLAVOR: datadog-dogstatsd + +new-e2e-agent-platform-step-by-step-centos-a6-x86_64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_centos + - .new-e2e_centos_a6_x86_64 + - .new-e2e_agent_a6 + rules: + !reference [.on_deploy_a6] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-centos-a7-x86_64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_centos + - .new-e2e_centos_a7_x86_64 + - .new-e2e_agent_a7 + rules: + !reference [.on_deploy_a7] + variables: + FLAVOR: datadog-agent diff --git a/.gitlab/new-e2e_testing/debian.yml b/.gitlab/new-e2e_testing/debian.yml index bbceab7b4c661..9d3e6ccc752e8 100644 --- a/.gitlab/new-e2e_testing/debian.yml +++ b/.gitlab/new-e2e_testing/debian.yml @@ -1,4 +1,3 @@ - .new-e2e_os_debian: variables: E2E_PLATFORM: debian @@ -37,7 +36,7 @@ new-e2e-agent-platform-install-script-debian-a6-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_debian @@ -48,7 +47,7 @@ new-e2e-agent-platform-install-script-debian-a6-x86_64: new-e2e-agent-platform-install-script-debian-a6-arm64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_debian @@ -59,7 +58,7 @@ new-e2e-agent-platform-install-script-debian-a6-arm64: new-e2e-agent-platform-install-script-debian-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_debian @@ -72,7 +71,7 @@ new-e2e-agent-platform-install-script-debian-a7-x86_64: new-e2e-agent-platform-install-script-debian-a7-arm64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_debian @@ -85,7 +84,7 @@ new-e2e-agent-platform-install-script-debian-a7-arm64: new-e2e-agent-platform-install-script-debian-iot-agent-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_debian @@ -98,7 +97,7 @@ new-e2e-agent-platform-install-script-debian-iot-agent-a7-x86_64: new-e2e-agent-platform-install-script-debian-dogstatsd-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_debian @@ -109,8 +108,9 @@ new-e2e-agent-platform-install-script-debian-dogstatsd-a7-x86_64: new-e2e-agent-platform-install-script-debian-heroku-agent-a6-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template + - .new-e2e_install_script - .new-e2e_os_debian - .new-e2e_debian_a6_x86_64 - .new-e2e_agent_a6 @@ -119,10 +119,63 @@ new-e2e-agent-platform-install-script-debian-heroku-agent-a6-x86_64: new-e2e-agent-platform-install-script-debian-heroku-agent-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template + - .new-e2e_install_script - .new-e2e_os_debian - .new-e2e_debian_a7_x86_64 - .new-e2e_agent_a7 variables: FLAVOR: datadog-heroku-agent + +new-e2e-agent-platform-step-by-step-debian-a7-x64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_debian + - .new-e2e_debian_a7_x86_64 + - .new-e2e_agent_a7 + rules: + !reference [.on_deploy_a7] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-debian-a7-arm64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_debian + - .new-e2e_debian_a7_arm64 + - .new-e2e_agent_a7 + rules: + !reference [.on_deploy_a7] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-debian-a6-x86_64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_debian + - .new-e2e_debian_a6_x86_64 + - .new-e2e_agent_a6 + rules: + !reference [.on_deploy_a6] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-debian-a6-arm64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_debian + - .new-e2e_debian_a6_arm64 + - .new-e2e_agent_a6 + rules: + !reference [.on_deploy_a6] + variables: + FLAVOR: datadog-agent diff --git a/.gitlab/new-e2e_testing/suse.yml b/.gitlab/new-e2e_testing/suse.yml index b7e671034e458..8093f141f7bd4 100644 --- a/.gitlab/new-e2e_testing/suse.yml +++ b/.gitlab/new-e2e_testing/suse.yml @@ -35,7 +35,7 @@ new-e2e-agent-platform-install-script-suse-a6-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_suse @@ -46,7 +46,7 @@ new-e2e-agent-platform-install-script-suse-a6-x86_64: new-e2e-agent-platform-install-script-suse-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_suse @@ -59,7 +59,7 @@ new-e2e-agent-platform-install-script-suse-a7-x86_64: new-e2e-agent-platform-install-script-suse-a7-arm64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_suse @@ -72,7 +72,7 @@ new-e2e-agent-platform-install-script-suse-a7-arm64: new-e2e-agent-platform-install-script-suse-iot-agent-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_suse @@ -85,7 +85,7 @@ new-e2e-agent-platform-install-script-suse-iot-agent-a7-x86_64: new-e2e-agent-platform-install-script-suse-dogstatsd-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_suse @@ -94,3 +94,41 @@ new-e2e-agent-platform-install-script-suse-dogstatsd-a7-x86_64: variables: FLAVOR: datadog-dogstatsd +new-e2e-agent-platform-step-by-step-suse-a6-x86_64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_suse + - .new-e2e_suse_a6_x86_64 + - .new-e2e_agent_a6 + rules: + !reference [.on_deploy_a6] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-suse-a7-x86_64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_suse + - .new-e2e_suse_a7_x86_64 + - .new-e2e_agent_a7 + rules: + !reference [.on_deploy_a7] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-suse-a7-arm64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_suse + - .new-e2e_suse_a7_arm64 + - .new-e2e_agent_a7 + rules: + !reference [.on_deploy_a7] + variables: + FLAVOR: datadog-agent diff --git a/.gitlab/new-e2e_testing/ubuntu.yml b/.gitlab/new-e2e_testing/ubuntu.yml index 39819b5452841..d9b148674dfe0 100644 --- a/.gitlab/new-e2e_testing/ubuntu.yml +++ b/.gitlab/new-e2e_testing/ubuntu.yml @@ -9,6 +9,12 @@ TEAM: agent-platform EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --cws-supported-osversion $E2E_CWS_SUPPORTED_OSVERS --major-version $AGENT_MAJOR_VERSION --arch $E2E_ARCH --flavor $FLAVOR +.new-e2e_step_by_step: + variables: + TARGETS: ./tests/agent-platform/step-by-step + TEAM: agent-platform + EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --cws-supported-osversion $E2E_CWS_SUPPORTED_OSVERS --major-version $AGENT_MAJOR_VERSION --arch $E2E_ARCH --flavor $FLAVOR + .new-e2e_ubuntu_a6_x86_64: variables: E2E_ARCH: x86_64 @@ -43,7 +49,7 @@ new-e2e-agent-platform-install-script-ubuntu-a6-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_ubuntu @@ -54,7 +60,7 @@ new-e2e-agent-platform-install-script-ubuntu-a6-x86_64: new-e2e-agent-platform-install-script-ubuntu-a6-arm64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_ubuntu @@ -65,7 +71,7 @@ new-e2e-agent-platform-install-script-ubuntu-a6-arm64: new-e2e-agent-platform-install-script-ubuntu-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_ubuntu @@ -78,7 +84,7 @@ new-e2e-agent-platform-install-script-ubuntu-a7-x86_64: new-e2e-agent-platform-install-script-ubuntu-a7-arm64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_ubuntu @@ -91,7 +97,7 @@ new-e2e-agent-platform-install-script-ubuntu-a7-arm64: new-e2e-agent-platform-install-script-ubuntu-iot-agent-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_ubuntu @@ -102,7 +108,7 @@ new-e2e-agent-platform-install-script-ubuntu-iot-agent-a7-x86_64: new-e2e-agent-platform-install-script-ubuntu-dogstatsd-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template - .new-e2e_install_script - .new-e2e_os_ubuntu @@ -113,8 +119,9 @@ new-e2e-agent-platform-install-script-ubuntu-dogstatsd-a7-x86_64: new-e2e-agent-platform-install-script-ubuntu-heroku-agent-a6-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template + - .new-e2e_install_script - .new-e2e_os_ubuntu - .new-e2e_ubuntu_a6_x86_64 - .new-e2e_agent_a6 @@ -123,10 +130,63 @@ new-e2e-agent-platform-install-script-ubuntu-heroku-agent-a6-x86_64: new-e2e-agent-platform-install-script-ubuntu-heroku-agent-a7-x86_64: stage: kitchen_testing - extends: + extends: - .new_e2e_template + - .new-e2e_install_script - .new-e2e_os_ubuntu - .new-e2e_ubuntu_a7_x86_64 - .new-e2e_agent_a7 variables: FLAVOR: datadog-heroku-agent + +new-e2e-agent-platform-step-by-step-ubuntu-a6-x86_64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_ubuntu + - .new-e2e_ubuntu_a6_x86_64 + - .new-e2e_agent_a6 + rules: + !reference [.on_deploy_a6] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-ubuntu-a6-arm64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_ubuntu + - .new-e2e_ubuntu_a6_arm64 + - .new-e2e_agent_a6 + rules: + !reference [.on_deploy_a6] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-ubuntu-a7-x86_64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_ubuntu + - .new-e2e_ubuntu_a7_x86_64 + - .new-e2e_agent_a7 + rules: + !reference [.on_deploy_a7] + variables: + FLAVOR: datadog-agent + +new-e2e-agent-platform-step-by-step-ubuntu-a7-arm64: + stage: kitchen_testing + extends: + - .new_e2e_template + - .new-e2e_step_by_step + - .new-e2e_os_ubuntu + - .new-e2e_ubuntu_a7_arm64 + - .new-e2e_agent_a7 + rules: + !reference [.on_deploy_a7] + variables: + FLAVOR: datadog-agent diff --git a/CHANGELOG-DCA.rst b/CHANGELOG-DCA.rst index 8cd3a66569e9e..d1cd7367e7bb1 100644 --- a/CHANGELOG-DCA.rst +++ b/CHANGELOG-DCA.rst @@ -2,6 +2,55 @@ Release Notes ============= +.. _Release Notes_7.50.0: + +7.50.0 / 6.50.0 +====== + +.. _Release Notes_7.50.0_New Features: + +New Features +------------ + +- Add language detection API handler to the cluster-agent. + +- Report `rate_limit_queries_remaining_min` telemetry from `external-metrics` server. + +- Added a new `--force` option to the `datadog-cluster-agent clusterchecks rebalance` command that allows you to force clustercheck rebalancing with utilization. + +- [Beta] Enable `APM` library injection in `cluster-agent` admission controller based on automatic language detection annotations. + + +.. _Release Notes_7.50.0_Enhancement Notes: + +Enhancement Notes +----------------- + +- Show Autodiscovery information in the output of ``datadog-cluster-agent status``. + +- Added CreateContainerConfigError wait reason to the `kubernetes_state.container.status_report.count.waiting` metric + reported by the kubernetes_state_core check. + +- Release the Leader Election Lock on shutdown to make the initialization of future cluster-agents faster. + +- The Datadog cluster-agent container image is now using Ubuntu 23.10 mantic + as the base image. + + +.. _Release Notes_7.50.0_Bug Fixes: + +Bug Fixes +--------- + +- Fixed a bug in the ``kubernetes_state_core`` check that caused tag corruption when ``telemetry`` was set to ``true``. + +- Fix stale metrics being reported by kubernetes_state_core check in some rare cases. + +- Fixed a bug in the rebalancing of cluster checks. Checks that contained + secrets were never rebalanced when the Cluster Agent was configured to not + resolve check secrets (option ``secret_backend_skip_checks`` set to true). + + .. _Release Notes_7.49.0: 7.49.0 / 6.49.0 diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 13d42f2390071..b98dd305a3bf7 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,298 @@ Release Notes ============= +.. _Release Notes_7.50.0: + +7.50.0 / 6.50.0 +====== + +.. _Release Notes_7.50.0_Prelude: + +Prelude +------- + +Release on: 2023-12-18 + +- Please refer to the `7.50.0 tag on integrations-core `_ for the list of changes on the Core Checks + + +.. _Release Notes_7.50.0_Upgrade Notes: + +Upgrade Notes +------------- + +- The `win32_event_log check `_ + has moved from Python `(integrations-core#16108) `_ + to Go `(#20701 )`_. + All ``legacy_mode: false`` configuration options are backwards compatible except for some regular expressions + used in the ``included_messages`` and ``excluded_messages`` options. + For example, Go regular expressions do not support lookahead or lookbehind assertions. If you do not + use these options, then no configuration changes are necessary. + See the `Python regular expression docs `_ and the + `Go regular expression docs `_ for more information on + the supported regular expression syntax. + Set ``legacy_mode_v2: true`` to revert to the Python implementation of the check. The Python implementation + may be removed in a future version of the Agent. + + +.. _Release Notes_7.50.0_New Features: + +New Features +------------ + +- The orchestrator check is moving from the Process Agent to the Node Agent. In the next release, this new check will replace the current pod check in the Process Agent. You can start using this new check now by manually setting the environment variable ``DD_ORCHESTRATOR_EXPLORER_RUN_ON_NODE_AGENT`` to ``true``. + +- Adds the following CPU manager metrics to the kubelet core check: `kubernetes_core.kubelet.cpu_manager.pinning_errors_total`, `kubernetes_core.kubelet.cpu_manager.pinning_requests_total`. + +- Add a diagnosis for connecting to the agent logs endpoints. This is accessible through the ``agent diagnose`` command. + +- Add FIPS mode support for Network Device Monitoring products + +- Added support for collecting Cloud Foundry container names without the Cluster Agent. + +- The Kubernetes State Metrics Core check now collects `kubernetes_state.ingress.tls`. + +- APM: Added a new endpoint tracer_flare/v1/. This endpoint acts as a + proxy to forward HTTP POST request from tracers to the serverless_flare + endpoint, allowing tracer flares to be triggered via remote config, improving + the support experience by automating the collection of logs. + +- CWS: Ability to send a signal to a process when a rule was triggered. + CWS: Add Kubernetes user session context to events, in particular the username, UID and groups of the user that ran the commands remotely. + +- A new rule post action - 'kill' - can now be used to send a specific + signal to a process that caused a rule to be triggered. By default, this + signal is SIGTERM. + + ``` + - id: my_rule + expression: ... + actions: + - kill: + signal: SIGUSR1 + ``` + +- Enable container image collection by default. + +- Enable container lifecycle events collection by default. + This feature helps stopped containers to be cleaned from Datadog faster. + +- [netflow] Allow collecting configurable fields for Netflow V9/IPFIX + +- Add support for Oracle 12.1 and Oracle 11. + +- Add monitoring of Oracle ASM disk groups. + +- Add metrics for monitoring Oracle resource manager. + +- [corechecks/snmp] Load downloaded profiles + +- DBM: Add configuration option to SQL obfuscator to use go-sqllexer package to run SQL obfuscation and normalization + +- Support filtering metrics from endpoint and service checks based + on namespace when the `DD_CONTAINER_EXCLUDE_METRICS` environment + variable is set. + +- The Windows Event Log tailer saves its current position in an event log and + resumes reading from that location when the Agent restarts. This allows + the Agent to collect events created before the Agent starts. + + +.. _Release Notes_7.50.0_Enhancement Notes: + +Enhancement Notes +----------------- + +- [corechecks/snmp] Support symbol modifiers for global metric tags and metadata tags. + +- Update the go-systemd package to the latest version (22.5.0). + +- Added default peer tags for APM stats aggregation which can be enabled through a new flag (`peer_tags_aggregation`). + +- Add a stop timeout to the Windows Agent services. If an Agent service + does not cleanly stop within 15 seconds after receiving a stop command + from the Service Control Manager, the service will hard stop. + The timeout can be configured by setting the DD_WINDOWS_SERVICE_STOP_TIMEOUT_SECONDS + environment variable. + Agent stop timeouts are logged to the Windows Event Log and can be monitored and alerted on. + +- APM: OTLP: Add support for custom container tags via resource attributes prefixed by `datadog.container.tag.*`. + +- Agents are now built with Go ``1.20.11``. + +- CWS: Support for Ubuntu 23.10. + CWS: Reduce memory usage of ring buffer on machines with more than 64 CPU cores. + CSPM: Move away from libapt to run Debian packages compliance checks. + +- DBM: Bump the minimum version of the `go-sqllexer` library to 0.0.7 to support collecting stored procedure names. + +- Add subcommand `diagnose show-metadata gohai` for gohai data + +- Upgraded JMXFetch to ``0.49.0`` which adds some more telemetry + and contains some small fixes. + +- Netflow now supports the `datadog-agent status` command, providing + configuration information. Any configuration errors encountered will be + listed. + +- Emit `database_instance` tag with the value `host/cdb`. The goal is to show each database separately in the DBM entry page. Currently, the backend initializes `database_instance` to `host`. + Also, the Agent will emit the new `db_server` tag because we have to initialize the `host` tag to `host/cdb`. + +- Improve obfuscator formatting. Prevent spaces after parentheses. + Prevent spaces before `#` when `#` is a part of an identifier. + +- Emit query metrics with zero executions to capture long runners spanning over several sampling periods. + +- Impose a time limit on query metrics processing. After exceeding the default limit of 20s, the Agent stops emitting execution plans and fqt events. + +- Add `oracle.inactive_seconds` metric. Add tags with session attributes to `oracle.process_pga*` metrics. + +- Stop override peer.service with other attributes in OTel spans. + +- Process-Agent: Improved parsing performance of the '/proc/pid/stat' file (Linux only) + +- [snmp_listener] Enable ``collect_topology`` by default. + +- dbm: add SQL obfuscation options to give customer more control over how SQL is obfuscated and normalized. + - ``RemoveSpaceBetweenParentheses`` - remove spaces between parentheses. This option is only valid when ``ObfuscationMode`` is ``obfuscate_and_normalize``. + - ``KeepNull` - disable obfuscating null values with ?. This option is only valid when ``ObfuscationMode`` is "obfuscate_only" or ``obfuscate_and_normalize``. + - ``KeepBoolean`` - disable obfuscating boolean values with ?. This option is only valid when ``ObfuscationMode`` is ``obfuscate_only`` or ``obfuscate_and_normalize``. + - ``KeepPositionalParameter`` - disable obfuscating positional parameters with ?. This option is only valid when ``ObfuscationMode`` is ``obfuscate_only`` or ``obfuscate_and_normalize``. + +- Add logic to support multiple tags created by a single label/annotaion. + For example, add the following config to extract tags for chart_name and app_chart_name. + podLabelsAsTags: + chart_name: chart_name, app_chart_name + Note: the format must be a comma-separated list of tags. + +- The logs collection pipeline has been through a refactor to support + processing only the message content (instead of the whole raw message) + in the journald and Windows events tailers. + This feature is experimental and off by default since it changes how + existing `log_processing_rules` behaves with journald and Windows events + tailer. + Note that it will be switched on by default in a future release of the Agent. + A warning notifying about this is shown when the journald and Windows events + tailers are used with some `log_processing_rules`. + +- The Datadog agent container image is now using Ubuntu 23.10 mantic + as the base image. + +- The win32_event_log check now continuously collects and reports events instead of waiting for + ``min_collection_interval`` to collect. + ``min_collection_interval`` now controls how frequently the check attempts to reconnect + when the event subscription is in an error state. + + +.. _Release Notes_7.50.0_Deprecation Notes: + +Deprecation Notes +----------------- + +- Installing the Agent on Windows Server versions lower than 2016 and client versions lower than 10 is now deprecated. + +- The ``timeout`` option for the win32_event_log check is no longer applicable and can be removed. If the option + is set, the check logs a deprecation warning and ignores the option. + + +.. _Release Notes_7.50.0_Security Notes: + +Security Notes +-------------- + +- Fix ``CVE-2023-45283`` and ``CVE-2023-45284`` + +- Update OpenSSL from 3.0.11 to 3.0.12. + This addresses CVE-2023-5363. + + +.. _Release Notes_7.50.0_Bug Fixes: + +Bug Fixes +--------- + +- On Windows, uninstalling the Agent should not fail if the Datadog Agent registry key is missing. + +- APM: OTLP: Only extract DD container tags from resource attributes. Previously, container tags were also extracted from span attributes. + +- APM: OTLP: Only add container tags in tag `_dd.tags.container`. Previously, container tags were also added as span tags. + +- Resolved an issue in the containerd collector where the SBOM collection did not correctly attach RepoTags and RepoDigests to the SBOM payload. + +- Add a workaround for a bug in a Windows API that can cause the Agent to + crash when collecting forwarded events from the Windows Event Log. + +- Resolve the issue with hostname resolution in the kube_apiserver provider when the useHostNetwork setting is enabled. + +- Fix an issue that prevented process ID (PID) from being associated with containers in Live Container View when the Agent is deployed in AWS Fargate. + +- APM: Fixed trace-agent not forwarding errors from remote configuration and reporting them all as 500s + +- On Windows, the `SE_DACL_AUTO_INHERITED` flag is reset on `%PROJECTLOCATION%` during upgrades and uninstalls. + +- Fixes a bug in the Windows NPM driver where NPM displays byte overcounts. + +- For USM on Windows, fixes the problem where paths were being erroneously + reported as truncated + +- Fixes journald log's Seek function to be set at the beginning or end upon initialization. + +- Fixed the cause of some crashes related to CPU instruction + incompatibility happening under certain CPUs when making calls to + the included libgmp library. + +- [kubelet] The Kubelet client no longer fails to initialize when the parameter ``kubelet_tls_verify`` is set to ``false`` with a misconfigured root certificate authority. + +- Fixes a bug where the process-agent process check command would fail to run + when language detection was enabled. + +- Document query metrics `metric_prefix` parameter. + +- Set the tag `dd.internal.resource:database_instance` to `host` instead of `host/cdb`. + +- Switch to the new obfuscator where bugs such as getting an error when obfuscating `@!` and where comments on DMLs weren't being removed are fixed. + +- Fixes wrong values in Oracle query metrics data. Extreme cases had inflated statistics and missing statements. The affected were pure DML and PL/SQL statements. + +- Fix the bug that prevented Oracle DBM working properly on AWS RDS non-multitenant instances. + +- Fix an issue that caused the win32_event_log check to not stop running when the rate of incoming event + records was higher than the ``timeout`` option. The ``timeout`` option is now deprecated. + +- The Windows Event Log tailer automatically recovers and is able to resume collecting + events when a log provider is reinstalled, which sometimes happens during Windows updates. + + +.. _Release Notes_7.49.1: + +7.49.1 / 6.49.1 +====== + +.. _Release Notes_7.49.1_Prelude: + +Prelude +------- + +Release on: 2023-11-15 + +- Please refer to the `7.49.1 tag on integrations-core `_ for the list of changes on the Core Checks + + +.. _Release Notes_7.49.1_Bug Fixes: + +Bug Fixes +--------- + +- CWS: add ``arch`` field into agent context included in CWS events. + +- APM: Fix a deadlock issue which can prevent the trace-agent from shutting down. + +- CWS: Fix the broken lineage check for process activity in CWS. + +- APM: fix a regression in the Trace Agent that caused container tagging + with UDS and cgroup v2 to fail. + + .. _Release Notes_7.49.0: 7.49.0 / 6.49.0 diff --git a/Dockerfiles/agent/Dockerfile b/Dockerfiles/agent/Dockerfile index 4a97d00786b20..20e748987300a 100644 --- a/Dockerfiles/agent/Dockerfile +++ b/Dockerfiles/agent/Dockerfile @@ -46,7 +46,7 @@ COPY datadog-agent*_$TARGETARCH.deb / WORKDIR /output # Get s6-overlay -ENV S6_VERSION v1.22.1.0 +ENV S6_VERSION v2.2.0.3 ENV JUST_CONTAINERS_DOWNLOAD_LOCATION=${GENERAL_ARTIFACTS_CACHE_BUCKET_URL:+${GENERAL_ARTIFACTS_CACHE_BUCKET_URL}/s6-overlay} ENV JUST_CONTAINERS_DOWNLOAD_LOCATION=${JUST_CONTAINERS_DOWNLOAD_LOCATION:-https://github.com/just-containers/s6-overlay/releases/download} RUN apt install --no-install-recommends -y curl ca-certificates diff --git a/Dockerfiles/agent/s6.amd64.sha256 b/Dockerfiles/agent/s6.amd64.sha256 index c1ea1fd0816dd..117fcd5d3485e 100644 --- a/Dockerfiles/agent/s6.amd64.sha256 +++ b/Dockerfiles/agent/s6.amd64.sha256 @@ -1 +1 @@ -73f9779203310ddf9c5132546a1978e1a2b05990263b92ed2c34c1e258e2df6c \ No newline at end of file +a7076cf205b331e9f8479bbb09d9df77dbb5cd8f7d12e9b74920902e0c16dd98 \ No newline at end of file diff --git a/Dockerfiles/agent/s6.arm64.sha256 b/Dockerfiles/agent/s6.arm64.sha256 index 9c82a38fb8be1..5dace0033135c 100644 --- a/Dockerfiles/agent/s6.arm64.sha256 +++ b/Dockerfiles/agent/s6.arm64.sha256 @@ -1 +1 @@ -4eac8bfebdb004eaa0b5ff6a09eb0b24e308cc0d7f37912ab19d3d063be3279c \ No newline at end of file +84f585a100b610124bb80e441ef2dc2d68ac2c345fd393d75a6293e0951ccfc5 \ No newline at end of file diff --git a/cmd/agent/common/loader.go b/cmd/agent/common/loader.go index daa4867e1892a..7112f70a46cf1 100644 --- a/cmd/agent/common/loader.go +++ b/cmd/agent/common/loader.go @@ -65,9 +65,7 @@ func GetWorkloadmetaInit() workloadmeta.InitHelper { }) } -var ( - collectorOnce sync.Once -) +var collectorOnce sync.Once // LoadCollector instantiate the collector and init the global state 'Coll'. // @@ -77,7 +75,7 @@ func LoadCollector(senderManager sender.SenderManager) collector.Collector { collectorOnce.Do(func() { // create the Collector instance and start all the components // NOTICE: this will also setup the Python environment, if available - Coll = collector.NewCollector(senderManager, GetPythonPaths()...) + Coll = collector.NewCollector(senderManager, config.Datadog.GetDuration("check_cancel_timeout"), GetPythonPaths()...) }) return Coll } @@ -85,7 +83,6 @@ func LoadCollector(senderManager sender.SenderManager) collector.Collector { // LoadComponents configures several common Agent components: // tagger, collector, scheduler and autodiscovery func LoadComponents(senderManager sender.SenderManager, secretResolver secrets.Component, confdPath string) { - confSearchPaths := []string{ confdPath, filepath.Join(path.GetDistPath(), "conf.d"), diff --git a/cmd/agent/common/path/go.mod b/cmd/agent/common/path/go.mod new file mode 100644 index 0000000000000..285f70f53707c --- /dev/null +++ b/cmd/agent/common/path/go.mod @@ -0,0 +1,26 @@ +module github.com/DataDog/datadog-agent/cmd/agent/common/path + +go 1.20 + +replace ( + github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../pkg/util/executable + github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log + github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber + github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../pkg/util/winutil + +) + +require ( + github.com/DataDog/datadog-agent/pkg/util/executable v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/util/log v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/util/winutil v0.0.0-00010101000000-000000000000 + golang.org/x/sys v0.14.0 +) + +require ( + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.50.0-rc.4 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) diff --git a/cmd/agent/common/path/go.sum b/cmd/agent/common/path/go.sum new file mode 100644 index 0000000000000..82984d8628e53 --- /dev/null +++ b/cmd/agent/common/path/go.sum @@ -0,0 +1,16 @@ +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/cmd/agent/dist/conf.d/windows_registry.d/conf.yaml.example b/cmd/agent/dist/conf.d/windows_registry.d/conf.yaml.example index 1237b66f75e17..a66f53940b153 100644 --- a/cmd/agent/dist/conf.d/windows_registry.d/conf.yaml.example +++ b/cmd/agent/dist/conf.d/windows_registry.d/conf.yaml.example @@ -145,14 +145,3 @@ instances: ## This is useful for cluster-level checks. # # empty_default_hostname: false - - ## @param metric_patterns - mapping - optional - ## A mapping of metrics to include or exclude, with each entry being a regular expression. - ## - ## Metrics defined in `exclude` will take precedence in case of overlap. - # - # metric_patterns: - # include: - # - - # exclude: - # - diff --git a/cmd/agent/subcommands/run/command.go b/cmd/agent/subcommands/run/command.go index 07b4eadb3a7c7..f34601003ba02 100644 --- a/cmd/agent/subcommands/run/command.go +++ b/cmd/agent/subcommands/run/command.go @@ -53,6 +53,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors" "github.com/DataDog/datadog-agent/comp/dogstatsd" + "github.com/DataDog/datadog-agent/comp/dogstatsd/replay" dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" dogstatsddebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" "github.com/DataDog/datadog-agent/comp/forwarder" @@ -66,8 +67,10 @@ import ( "github.com/DataDog/datadog-agent/comp/logs" logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" "github.com/DataDog/datadog-agent/comp/metadata" + "github.com/DataDog/datadog-agent/comp/metadata/host" "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" "github.com/DataDog/datadog-agent/comp/metadata/inventorychecks" + "github.com/DataDog/datadog-agent/comp/metadata/inventoryhost" "github.com/DataDog/datadog-agent/comp/metadata/runner" "github.com/DataDog/datadog-agent/comp/ndmtmp" "github.com/DataDog/datadog-agent/comp/netflow" @@ -198,6 +201,7 @@ func run(log log.Component, telemetry telemetry.Component, sysprobeconfig sysprobeconfig.Component, server dogstatsdServer.Component, + _ replay.Component, serverDebug dogstatsddebug.Component, forwarder defaultforwarder.Component, wmeta workloadmeta.Component, @@ -209,7 +213,9 @@ func run(log log.Component, cliParams *cliParams, logsAgent optional.Option[logsAgent.Component], otelcollector otelcollector.Component, + _ host.Component, invAgent inventoryagent.Component, + _ inventoryhost.Component, _ secrets.Component, invChecks inventorychecks.Component, _ netflowServer.Component, @@ -435,7 +441,7 @@ func startAgent( } // init settings that can be changed at runtime - if err := initRuntimeSettings(serverDebug, invAgent); err != nil { + if err := initRuntimeSettings(serverDebug); err != nil { log.Warnf("Can't initiliaze the runtime settings: %v", err) } diff --git a/cmd/agent/subcommands/run/command_windows.go b/cmd/agent/subcommands/run/command_windows.go index 6b928d17e0c8b..26a36d9ac7e89 100644 --- a/cmd/agent/subcommands/run/command_windows.go +++ b/cmd/agent/subcommands/run/command_windows.go @@ -44,12 +44,15 @@ import ( "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl" "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/workloadmeta" + "github.com/DataDog/datadog-agent/comp/dogstatsd/replay" dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" dogstatsddebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" + "github.com/DataDog/datadog-agent/comp/metadata/host" "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" "github.com/DataDog/datadog-agent/comp/metadata/inventorychecks" + "github.com/DataDog/datadog-agent/comp/metadata/inventoryhost" "github.com/DataDog/datadog-agent/comp/metadata/runner" netflowServer "github.com/DataDog/datadog-agent/comp/netflow/server" otelcollector "github.com/DataDog/datadog-agent/comp/otelcol/collector" @@ -80,6 +83,7 @@ func StartAgentWithDefaults(ctxChan <-chan context.Context) (<-chan error, error telemetry telemetry.Component, sysprobeconfig sysprobeconfig.Component, server dogstatsdServer.Component, + _ replay.Component, serverDebug dogstatsddebug.Component, wmeta workloadmeta.Component, rcclient rcclient.Component, @@ -89,7 +93,9 @@ func StartAgentWithDefaults(ctxChan <-chan context.Context) (<-chan error, error sharedSerializer serializer.MetricSerializer, otelcollector otelcollector.Component, demultiplexer demultiplexer.Component, + _ host.Component, invAgent inventoryagent.Component, + _ inventoryhost.Component, _ secrets.Component, invChecks inventorychecks.Component, _ netflowServer.Component, diff --git a/cmd/agent/subcommands/run/settings.go b/cmd/agent/subcommands/run/settings.go index 2f5857efc7f96..e63c92cb20e40 100644 --- a/cmd/agent/subcommands/run/settings.go +++ b/cmd/agent/subcommands/run/settings.go @@ -8,14 +8,13 @@ package run import ( "github.com/DataDog/datadog-agent/cmd/agent/subcommands/run/internal/settings" dogstatsddebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" - "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" commonsettings "github.com/DataDog/datadog-agent/pkg/config/settings" ) // initRuntimeSettings builds the map of runtime settings configurable at runtime. -func initRuntimeSettings(serverDebug dogstatsddebug.Component, invAgent inventoryagent.Component) error { +func initRuntimeSettings(serverDebug dogstatsddebug.Component) error { // Runtime-editable settings must be registered here to dynamically populate command-line information - if err := commonsettings.RegisterRuntimeSetting(commonsettings.NewLogLevelRuntimeSetting(invAgent)); err != nil { + if err := commonsettings.RegisterRuntimeSetting(commonsettings.NewLogLevelRuntimeSetting()); err != nil { return err } if err := commonsettings.RegisterRuntimeSetting(commonsettings.NewRuntimeMutexProfileFraction()); err != nil { diff --git a/cmd/cluster-agent/subcommands/start/command.go b/cmd/cluster-agent/subcommands/start/command.go index fe38311c2e122..569d9fdcac016 100644 --- a/cmd/cluster-agent/subcommands/start/command.go +++ b/cmd/cluster-agent/subcommands/start/command.go @@ -162,7 +162,7 @@ func start(log log.Component, config config.Component, telemetry telemetry.Compo }() // Setup healthcheck port - var healthPort = pkgconfig.Datadog.GetInt("health_port") + healthPort := pkgconfig.Datadog.GetInt("health_port") if healthPort > 0 { err := healthprobe.Serve(mainCtx, healthPort) if err != nil { @@ -232,15 +232,13 @@ func start(log log.Component, config config.Component, telemetry telemetry.Compo eventRecorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "datadog-cluster-agent"}) ctx := apiserver.ControllerContext{ - InformerFactory: apiCl.InformerFactory, - WPAClient: apiCl.WPAClient, - WPAInformerFactory: apiCl.WPAInformerFactory, - DDClient: apiCl.DDClient, - DDInformerFactory: apiCl.DynamicInformerFactory, - Client: apiCl.Cl, - IsLeaderFunc: le.IsLeader, - EventRecorder: eventRecorder, - StopCh: stopCh, + InformerFactory: apiCl.InformerFactory, + DynamicClient: apiCl.DynamicInformerCl, + DynamicInformerFactory: apiCl.DynamicInformerFactory, + Client: apiCl.InformerCl, + IsLeaderFunc: le.IsLeader, + EventRecorder: eventRecorder, + StopCh: stopCh, } if aggErr := apiserver.StartControllers(ctx); aggErr != nil { @@ -344,7 +342,6 @@ func start(log log.Component, config config.Component, telemetry telemetry.Compo SecretInformers: apiCl.CertificateSecretInformerFactory, WebhookInformers: apiCl.WebhookConfigInformerFactory, Client: apiCl.Cl, - DiscoveryClient: apiCl.DiscoveryCl, StopCh: stopCh, } @@ -419,7 +416,7 @@ func start(log log.Component, config config.Component, telemetry telemetry.Compo // initRuntimeSettings builds the map of runtime Cluster Agent settings configurable at runtime. func initRuntimeSettings() error { - if err := commonsettings.RegisterRuntimeSetting(commonsettings.NewLogLevelRuntimeSetting(nil)); err != nil { + if err := commonsettings.RegisterRuntimeSetting(commonsettings.NewLogLevelRuntimeSetting()); err != nil { return err } diff --git a/cmd/cluster-agent/subcommands/start/compliance.go b/cmd/cluster-agent/subcommands/start/compliance.go index a6be8344d2225..ea2bde48e4ce8 100644 --- a/cmd/cluster-agent/subcommands/start/compliance.go +++ b/cmd/cluster-agent/subcommands/start/compliance.go @@ -124,7 +124,7 @@ func startCompliance(senderManager sender.SenderManager, stopper startstop.Stopp func wrapKubernetesClient(apiCl *apiserver.APIClient, isLeader func() bool) compliance.KubernetesProvider { return func(ctx context.Context) (dynamic.Interface, discovery.DiscoveryInterface, error) { if isLeader() { - return apiCl.DynamicCl, apiCl.DiscoveryCl, nil + return apiCl.DynamicCl, apiCl.Cl.Discovery(), nil } return nil, nil, compliance.ErrIncompatibleEnvironment } diff --git a/cmd/cws-instrumentation/subcommands/selftestscmd/selftests.go b/cmd/cws-instrumentation/subcommands/selftestscmd/selftests.go new file mode 100644 index 0000000000000..4908862b7fbf6 --- /dev/null +++ b/cmd/cws-instrumentation/subcommands/selftestscmd/selftests.go @@ -0,0 +1,81 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +// Package selftestscmd holds the selftests command of CWS injector +package selftestscmd + +import ( + "errors" + "os" + "os/exec" + "strings" + + "github.com/spf13/cobra" +) + +type execParams struct { + enabled bool + path string + args string +} + +type openParams struct { + enabled bool + path string +} + +type selftestsCliParams struct { + exec execParams + open openParams +} + +// Command returns the commands for the selftests subcommand +func Command() []*cobra.Command { + var params selftestsCliParams + + selftestsCmd := &cobra.Command{ + Use: "selftests", + Short: "run selftests against the tracer", + RunE: func(cmd *cobra.Command, args []string) error { + var err error + if params.exec.enabled { + err = errors.Join(err, selftestExec(¶ms.exec)) + } + if params.open.enabled { + err = errors.Join(err, selftestOpen(¶ms.open)) + } + return err + }, + } + + selftestsCmd.Flags().BoolVar(¶ms.exec.enabled, "exec", false, "run the exec selftest") + selftestsCmd.Flags().StringVar(¶ms.exec.path, "exec.path", "/usr/bin/date", "path to the file to execute") + selftestsCmd.Flags().StringVar(¶ms.exec.args, "exec.args", "", "arguments to pass to the executable") + selftestsCmd.Flags().BoolVar(¶ms.open.enabled, "open", false, "run the open selftest") + selftestsCmd.Flags().StringVar(¶ms.open.path, "open.path", "/tmp/open.test", "path to the file to open") + + return []*cobra.Command{selftestsCmd} +} + +func selftestExec(params *execParams) error { + if params.args != "" { + return exec.Command(params.path, strings.Split(params.args, " ")...).Run() + } + return exec.Command(params.path).Run() +} + +func selftestOpen(params *openParams) error { + f, createErr := os.OpenFile(params.path, os.O_CREATE|os.O_EXCL, 0400) + if createErr != nil { + f, openErr := os.Open(params.path) + if openErr != nil { + return errors.Join(createErr, openErr) + } + return f.Close() + } + return errors.Join(f.Close(), os.Remove(params.path)) +} diff --git a/cmd/cws-instrumentation/subcommands/tracecmd/trace.go b/cmd/cws-instrumentation/subcommands/tracecmd/trace.go index 379573e97d4f4..29db0e7d7cb29 100644 --- a/cmd/cws-instrumentation/subcommands/tracecmd/trace.go +++ b/cmd/cws-instrumentation/subcommands/tracecmd/trace.go @@ -11,6 +11,7 @@ package tracecmd import ( "github.com/spf13/cobra" + "github.com/DataDog/datadog-agent/cmd/cws-instrumentation/subcommands/selftestscmd" "github.com/DataDog/datadog-agent/pkg/security/ptracer" ) @@ -58,5 +59,7 @@ func Command() []*cobra.Command { traceCmd.Flags().Int32Var(¶ms.UID, uid, -1, "uid used to start the tracee") traceCmd.Flags().Int32Var(¶ms.GID, gid, -1, "gid used to start the tracee") + traceCmd.AddCommand(selftestscmd.Command()...) + return []*cobra.Command{traceCmd} } diff --git a/cmd/security-agent/subcommands/check/command.go b/cmd/security-agent/subcommands/check/command.go index 1f8557512a0e2..531f070ee57b4 100644 --- a/cmd/security-agent/subcommands/check/command.go +++ b/cmd/security-agent/subcommands/check/command.go @@ -239,7 +239,7 @@ func dumpComplianceEvents(reportFile string, events []*compliance.CheckEvent) er if err != nil { return fmt.Errorf("could not marshal events map: %w", err) } - if err := os.WriteFile(reportFile, b, 0644); err != nil { + if err := os.WriteFile(reportFile, b, 0o644); err != nil { return fmt.Errorf("could not write report file in %q: %w", reportFile, err) } return nil @@ -275,7 +275,7 @@ func complianceKubernetesProvider(_ctx context.Context) (dynamic.Interface, disc if err != nil { return nil, nil, err } - return apiCl.DynamicCl, apiCl.DiscoveryCl, nil + return apiCl.DynamicCl, apiCl.Cl.Discovery(), nil } type fakeResolver struct { diff --git a/cmd/security-agent/subcommands/start/command.go b/cmd/security-agent/subcommands/start/command.go index 23eca06e719fc..6474059d1444a 100644 --- a/cmd/security-agent/subcommands/start/command.go +++ b/cmd/security-agent/subcommands/start/command.go @@ -308,7 +308,7 @@ func RunAgent(ctx context.Context, log log.Component, config config.Component, s } func initRuntimeSettings() error { - return settings.RegisterRuntimeSetting(settings.NewLogLevelRuntimeSetting(nil)) + return settings.RegisterRuntimeSetting(settings.NewLogLevelRuntimeSetting()) } // StopAgent stops the API server and clean up resources diff --git a/cmd/system-probe/modules/network_tracer.go b/cmd/system-probe/modules/network_tracer.go index 0a9a2f56b8835..16e1c002833dd 100644 --- a/cmd/system-probe/modules/network_tracer.go +++ b/cmd/system-probe/modules/network_tracer.go @@ -193,14 +193,12 @@ func (nt *networkTracer) Register(httpMux *module.Router) error { maps = strings.Split(listMaps, ",") } - ebpfMaps, err := nt.tracer.DebugEBPFMaps(maps...) + err := nt.tracer.DebugEBPFMaps(w, maps...) if err != nil { log.Errorf("unable to retrieve eBPF maps: %s", err) w.WriteHeader(500) return } - - utils.WriteAsJSON(w, ebpfMaps) }) httpMux.HandleFunc("/debug/conntrack/cached", func(w http.ResponseWriter, req *http.Request) { diff --git a/comp/core/config/component.go b/comp/core/config/component.go index 283935d3187c1..4b1f6a91b5220 100644 --- a/comp/core/config/component.go +++ b/comp/core/config/component.go @@ -16,26 +16,25 @@ package config import ( - "go.uber.org/fx" - - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "go.uber.org/fx" ) // team: agent-shared-components // LogConfig reads the logger config -type LogConfig config.Reader +type LogConfig pkgconfigmodel.Reader // Component is the component type. type Component interface { - config.Reader + pkgconfigmodel.Reader // Warnings returns config warnings collected during setup. - Warnings() *config.Warnings + Warnings() *pkgconfigmodel.Warnings // Object returns wrapped config - Object() config.Reader + Object() pkgconfigmodel.Reader } // Module defines the fx options for this component. diff --git a/comp/core/config/component_mock.go b/comp/core/config/component_mock.go index 843226e52a161..0819dca0169fc 100644 --- a/comp/core/config/component_mock.go +++ b/comp/core/config/component_mock.go @@ -20,16 +20,15 @@ package config import ( - "go.uber.org/fx" - - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "go.uber.org/fx" ) // Mock implements mock-specific methods. type Mock interface { Component - config.Writer + pkgconfigmodel.Writer } // MockModule defines the fx options for the mock component. diff --git a/comp/core/config/config.go b/comp/core/config/config.go index a74673cc88614..76d24398f48a3 100644 --- a/comp/core/config/config.go +++ b/comp/core/config/config.go @@ -9,23 +9,23 @@ import ( "os" "strings" - "go.uber.org/fx" - "github.com/DataDog/datadog-agent/comp/core/secrets" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "go.uber.org/fx" ) // Reader is a subset of Config that only allows reading of configuration -type Reader = config.Reader //nolint:revive +type Reader = pkgconfigmodel.Reader //nolint:revive // cfg implements the Component. type cfg struct { // this component is currently implementing a thin wrapper around pkg/config, // and uses globals in that package. - config.Config + pkgconfigmodel.Config // warnings are the warnings generated during setup - warnings *config.Warnings + warnings *pkgconfigmodel.Warnings } // configDependencies is an interface that mimics the fx-oriented dependencies struct @@ -69,16 +69,17 @@ func NewServerlessConfig(path string) (Component, error) { } func newConfig(deps dependencies) (Component, error) { - warnings, err := setupConfig(deps) + config := pkgconfigsetup.Datadog + warnings, err := setupConfig(config, deps) returnErrFct := func(e error) (Component, error) { if e != nil && deps.Params.ignoreErrors { if warnings == nil { - warnings = &config.Warnings{} + warnings = &pkgconfigmodel.Warnings{} } warnings.Err = e e = nil } - return &cfg{Config: config.Datadog, warnings: warnings}, e + return &cfg{Config: config, warnings: warnings}, e } if err != nil { @@ -86,18 +87,18 @@ func newConfig(deps dependencies) (Component, error) { } if deps.Params.configLoadSecurityAgent { - if err := config.Merge(deps.Params.securityAgentConfigFilePaths); err != nil { + if err := pkgconfigsetup.Merge(deps.Params.securityAgentConfigFilePaths, config); err != nil { return returnErrFct(err) } } - return &cfg{Config: config.Datadog, warnings: warnings}, nil + return &cfg{Config: config, warnings: warnings}, nil } -func (c *cfg) Warnings() *config.Warnings { +func (c *cfg) Warnings() *pkgconfigmodel.Warnings { return c.warnings } -func (c *cfg) Object() config.Reader { +func (c *cfg) Object() pkgconfigmodel.Reader { return c.Config } diff --git a/comp/core/config/config_mock.go b/comp/core/config/config_mock.go index 5e2e36465e9c7..963f7470ff49d 100644 --- a/comp/core/config/config_mock.go +++ b/comp/core/config/config_mock.go @@ -12,11 +12,11 @@ import ( "strings" "testing" - "go.uber.org/fx" - "github.com/DataDog/datadog-agent/comp/core/secrets" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "go.uber.org/fx" ) type mockDependencies struct { @@ -37,41 +37,41 @@ func (m mockDependencies) getSecretResolver() secrets.Component { // newMock exported mock builder to allow modifying mocks that might be // supplied in tests and used for dep injection. func newMock(deps mockDependencies, t testing.TB) (Component, error) { - backupConfig := config.NewConfig("", "", strings.NewReplacer()) - backupConfig.CopyConfig(config.Datadog) + backupConfig := pkgconfigmodel.NewConfig("", "", strings.NewReplacer()) + backupConfig.CopyConfig(pkgconfigsetup.Datadog) - config.Datadog.CopyConfig(config.NewConfig("mock", "XXXX", strings.NewReplacer())) + pkgconfigsetup.Datadog.CopyConfig(pkgconfigmodel.NewConfig("mock", "XXXX", strings.NewReplacer())) env.SetFeatures(t, deps.Params.Features...) // call InitConfig to set defaults. - config.InitConfig(config.Datadog) + pkgconfigsetup.InitConfig(pkgconfigsetup.Datadog) c := &cfg{ - Config: config.Datadog, + Config: pkgconfigsetup.Datadog, } if !deps.Params.SetupConfig { if deps.Params.ConfFilePath != "" { - config.Datadog.SetConfigType("yaml") - err := config.Datadog.ReadConfig(strings.NewReader(deps.Params.ConfFilePath)) + pkgconfigsetup.Datadog.SetConfigType("yaml") + err := pkgconfigsetup.Datadog.ReadConfig(strings.NewReader(deps.Params.ConfFilePath)) if err != nil { // The YAML was invalid, fail initialization of the mock config. return nil, err } } } else { - warnings, _ := setupConfig(deps) + warnings, _ := setupConfig(pkgconfigsetup.Datadog, deps) c.warnings = warnings } // Overrides are explicit and will take precedence over any other // setting for k, v := range deps.Params.Overrides { - config.Datadog.SetWithoutSource(k, v) + pkgconfigsetup.Datadog.SetWithoutSource(k, v) } // swap the existing config back at the end of the test. - t.Cleanup(func() { config.Datadog.CopyConfig(backupConfig) }) + t.Cleanup(func() { pkgconfigsetup.Datadog.CopyConfig(backupConfig) }) return c, nil } diff --git a/comp/core/config/go.mod b/comp/core/config/go.mod new file mode 100644 index 0000000000000..825ad61f839ed --- /dev/null +++ b/comp/core/config/go.mod @@ -0,0 +1,99 @@ +module github.com/DataDog/datadog-agent/comp/core/config + +go 1.20 + +replace ( + github.com/DataDog/datadog-agent/cmd/agent/common/path => ../../../cmd/agent/common/path + github.com/DataDog/datadog-agent/comp/core/flare/types => ../flare/types + github.com/DataDog/datadog-agent/comp/core/secrets => ../secrets/ + github.com/DataDog/datadog-agent/comp/core/telemetry => ../telemetry/ + github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../pkg/collector/check/defaults + github.com/DataDog/datadog-agent/pkg/config/env => ../../../pkg/config/env + github.com/DataDog/datadog-agent/pkg/config/model => ../../../pkg/config/model/ + github.com/DataDog/datadog-agent/pkg/config/setup => ../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/telemetry => ../../../pkg/telemetry + github.com/DataDog/datadog-agent/pkg/util/executable => ../../../pkg/util/executable + github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../pkg/util/filesystem + github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../pkg/util/fxutil + github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../pkg/util/hostname/validate + github.com/DataDog/datadog-agent/pkg/util/log => ../../../pkg/util/log + github.com/DataDog/datadog-agent/pkg/util/optional => ../../../pkg/util/optional/ + github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../pkg/util/scrubber/ + github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../pkg/util/system/socket + github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../pkg/util/winutil +) + +require ( + github.com/DataDog/datadog-agent/cmd/agent/common/path v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/core/secrets v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/config/env v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/config/model v0.50.0-rc.4 + github.com/DataDog/datadog-agent/pkg/config/setup v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.50.0-rc.4 + github.com/DataDog/datadog-agent/pkg/util/winutil v0.0.0-00010101000000-000000000000 + github.com/DataDog/viper v1.12.0 + github.com/stretchr/testify v1.8.4 + go.uber.org/fx v1.18.2 +) + +require ( + github.com/DataDog/datadog-agent/comp/core/flare/types v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/comp/core/telemetry v0.50.0-rc.4 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/telemetry v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.50.0-rc.4 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.50.0-rc.4 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.50.0-rc.4 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.50.0-rc.4 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/magiconair/properties v1.8.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/pelletier/go-toml v1.2.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/prometheus/client_golang v1.17.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.11.1 // indirect + github.com/shirou/gopsutil/v3 v3.23.9 // indirect + github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/cobra v1.7.0 // indirect + github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + go.opentelemetry.io/otel v1.20.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.42.0 // indirect + go.opentelemetry.io/otel/metric v1.20.0 // indirect + go.opentelemetry.io/otel/sdk v1.20.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.20.0 // indirect + go.opentelemetry.io/otel/trace v1.20.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/dig v1.17.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.23.0 // indirect + golang.org/x/mod v0.8.0 // indirect + golang.org/x/sys v0.14.0 // indirect + golang.org/x/text v0.9.0 // indirect + golang.org/x/tools v0.6.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/comp/core/config/go.sum b/comp/core/config/go.sum new file mode 100644 index 0000000000000..1f74e5c4c0fb9 --- /dev/null +++ b/comp/core/config/go.sum @@ -0,0 +1,348 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/viper v1.12.0 h1:FufyZpZPxyszafSV5B8Q8it75IhhuJwH0T7QpT6HnD0= +github.com/DataDog/viper v1.12.0/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shirou/gopsutil/v3 v3.23.9 h1:ZI5bWVeu2ep4/DIxB4U9okeYJ7zp/QLTO4auRb/ty/E= +github.com/shirou/gopsutil/v3 v3.23.9/go.mod h1:x/NWSb71eMcjFIO0vhyGW5nZ7oSIgVjrCnADckb85GA= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= +go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= +go.opentelemetry.io/otel/exporters/prometheus v0.42.0 h1:jwV9iQdvp38fxXi8ZC+lNpxjK16MRcZlpDYvbuO1FiA= +go.opentelemetry.io/otel/exporters/prometheus v0.42.0/go.mod h1:f3bYiqNqhoPxkvI2LrXqQVC546K7BuRDL/kKuxkujhA= +go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= +go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= +go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= +go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= +go.opentelemetry.io/otel/sdk/metric v1.20.0 h1:5eD40l/H2CqdKmbSV7iht2KMK0faAIL2pVYzJOWobGk= +go.opentelemetry.io/otel/sdk/metric v1.20.0/go.mod h1:AGvpC+YF/jblITiafMTYgvRBUiwi9hZf0EYE2E5XlS8= +go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= +go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= +go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= +go.uber.org/fx v1.18.2 h1:bUNI6oShr+OVFQeU8cDNbnN7VFsu+SsjHzUF51V/GAU= +go.uber.org/fx v1.18.2/go.mod h1:g0V1KMQ66zIRk8bLu3Ea5Jt2w/cHlOIp4wdRsgh0JaY= +go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= +go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/comp/core/config/params_mock.go b/comp/core/config/params_mock.go index 529621570e5bb..a918de8fbdb2d 100644 --- a/comp/core/config/params_mock.go +++ b/comp/core/config/params_mock.go @@ -9,7 +9,7 @@ package config import ( - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigenv "github.com/DataDog/datadog-agent/pkg/config/env" ) // MockParams defines the parameter for the mock config. @@ -24,7 +24,7 @@ type MockParams struct { Overrides map[string]interface{} // Features is a parameter to set features for the mock config - Features []config.Feature + Features []pkgconfigenv.Feature // SetupConfig sets up the config as if it weren't a mock; essentially a full init SetupConfig bool diff --git a/comp/core/config/setup.go b/comp/core/config/setup.go index 3c79bd0ca5697..0c2dc40b16a6b 100644 --- a/comp/core/config/setup.go +++ b/comp/core/config/setup.go @@ -12,13 +12,13 @@ import ( "runtime" "strings" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/viper" - - "github.com/DataDog/datadog-agent/pkg/config" ) // setupConfig is copied from cmd/agent/common/helpers.go. -func setupConfig(deps configDependencies) (*config.Warnings, error) { +func setupConfig(config pkgconfigmodel.Config, deps configDependencies) (*pkgconfigmodel.Warnings, error) { p := deps.getParams() confFilePath := p.ConfFilePath @@ -27,31 +27,31 @@ func setupConfig(deps configDependencies) (*config.Warnings, error) { defaultConfPath := p.defaultConfPath if configName != "" { - config.Datadog.SetConfigName(configName) + config.SetConfigName(configName) } // set the paths where a config file is expected if len(confFilePath) != 0 { // if the configuration file path was supplied on the command line, // add that first so it's first in line - config.Datadog.AddConfigPath(confFilePath) + config.AddConfigPath(confFilePath) // If they set a config file directly, let's try to honor that if strings.HasSuffix(confFilePath, ".yaml") || strings.HasSuffix(confFilePath, ".yml") { - config.Datadog.SetConfigFile(confFilePath) + config.SetConfigFile(confFilePath) } } if defaultConfPath != "" { - config.Datadog.AddConfigPath(defaultConfPath) + config.AddConfigPath(defaultConfPath) } // load the configuration var err error - var warnings *config.Warnings + var warnings *pkgconfigmodel.Warnings resolver := deps.getSecretResolver() if resolver == nil { - warnings, err = config.LoadWithoutSecret() + warnings, err = pkgconfigsetup.LoadWithoutSecret(config, pkgconfigsetup.SystemProbe.GetEnvVars()) } else { - warnings, err = config.LoadWithSecret(resolver) + warnings, err = pkgconfigsetup.LoadWithSecret(config, resolver, pkgconfigsetup.SystemProbe.GetEnvVars()) } // If `!failOnMissingFile`, do not issue an error if we cannot find the default config file. diff --git a/comp/core/status/statusimpl/common_header_provider_test.go b/comp/core/status/statusimpl/common_header_provider_test.go index dfd7d616047f0..02c6c76b18491 100644 --- a/comp/core/status/statusimpl/common_header_provider_test.go +++ b/comp/core/status/statusimpl/common_header_provider_test.go @@ -58,16 +58,6 @@ func TestCommonHeaderProviderJSON(t *testing.T) { assert.NotEqual(t, "", stats["title"]) } -var expectedTextOutput = fmt.Sprintf(` Status date: 2018-01-05 11:25:15 UTC (1515151515000) - Agent start: 2018-01-05 11:25:15 UTC (1515151515000) - Pid: %d - Go Version: %s - Python Version: n/a - Build arch: %s - Agent flavor: %s - Log Level: info -`, pid, goVersion, arch, agentFlavor) - func TestCommonHeaderProviderText(t *testing.T) { nowFunc = func() time.Time { return time.Unix(1515151515, 0) } startTimeProvider = time.Unix(1515151515, 0) @@ -84,6 +74,16 @@ func TestCommonHeaderProviderText(t *testing.T) { buffer := new(bytes.Buffer) provider.Text(buffer) + expectedTextOutput := fmt.Sprintf(` Status date: 2018-01-05 11:25:15 UTC (1515151515000) + Agent start: 2018-01-05 11:25:15 UTC (1515151515000) + Pid: %d + Go Version: %s + Python Version: n/a + Build arch: %s + Agent flavor: %s + Log Level: info +`, pid, goVersion, arch, agentFlavor) + // We replace windows line break by linux so the tests pass on every OS expectedResult := strings.Replace(expectedTextOutput, "\r\n", "\n", -1) output := strings.Replace(buffer.String(), "\r\n", "\n", -1) diff --git a/comp/core/status/statusimpl/status.go b/comp/core/status/statusimpl/status.go index 56072ba7e732d..c3c6a859a52ab 100644 --- a/comp/core/status/statusimpl/status.go +++ b/comp/core/status/statusimpl/status.go @@ -62,13 +62,23 @@ func newStatus(deps dependencies) (status.Component, error) { // The exception is the collector section. We want that to be the first section to be displayed // We manually insert the collector section in the first place after sorting them alphabetically sortedSectionNames := []string{} + collectorSectionPresent := false + for _, provider := range deps.Providers { + if provider.Section() == status.CollectorSection && !collectorSectionPresent { + collectorSectionPresent = true + } + if !present(provider.Section(), sortedSectionNames) && provider.Section() != status.CollectorSection { sortedSectionNames = append(sortedSectionNames, provider.Section()) } } + sort.Strings(sortedSectionNames) - sortedSectionNames = append([]string{status.CollectorSection}, sortedSectionNames...) + + if collectorSectionPresent { + sortedSectionNames = append([]string{status.CollectorSection}, sortedSectionNames...) + } // Providers of each section are sort alphabetically by name sortedProvidersBySection := map[string][]status.Provider{} @@ -140,7 +150,6 @@ func (s *statusImplementation) GetStatus(format string, _ bool) ([]byte, error) } for _, section := range s.sortedSectionNames { - printHeader(b, section) newLine(b) @@ -221,10 +230,20 @@ func (s *statusImplementation) GetStatusBySection(section string, format string, err := sc.Text(b) if err != nil { - return b.Bytes(), err + errors = append(errors, err) } } + newLine(b) + + if len(errors) > 0 { + if err := renderErrors(b, errors); err != nil { + return []byte{}, err + } + + return b.Bytes(), nil + } + return b.Bytes(), nil case "html": var b = new(bytes.Buffer) diff --git a/comp/core/status/statusimpl/status_test.go b/comp/core/status/statusimpl/status_test.go index 47aaa3e2ee833..e9b9ee3169390 100644 --- a/comp/core/status/statusimpl/status_test.go +++ b/comp/core/status/statusimpl/status_test.go @@ -26,11 +26,12 @@ import ( ) type mockProvider struct { - data map[string]interface{} - text string - html string - name string - section string + data map[string]interface{} + text string + html string + name string + section string + returnError bool } func (m mockProvider) Name() string { @@ -42,6 +43,10 @@ func (m mockProvider) Section() string { } func (m mockProvider) JSON(stats map[string]interface{}) error { + if m.returnError { + return fmt.Errorf("JSON error") + } + for key, value := range m.data { stats[key] = value } @@ -50,21 +55,30 @@ func (m mockProvider) JSON(stats map[string]interface{}) error { } func (m mockProvider) Text(buffer io.Writer) error { + if m.returnError { + return fmt.Errorf("Text error") + } + _, err := buffer.Write([]byte(m.text)) return err } func (m mockProvider) HTML(buffer io.Writer) error { + if m.returnError { + return fmt.Errorf("HTML error") + } + _, err := buffer.Write([]byte(m.html)) return err } type mockHeaderProvider struct { - data map[string]interface{} - text string - html string - index int - name string + data map[string]interface{} + text string + html string + index int + name string + returnError bool } func (m mockHeaderProvider) Index() int { @@ -76,6 +90,10 @@ func (m mockHeaderProvider) Name() string { } func (m mockHeaderProvider) JSON(stats map[string]interface{}) error { + if m.returnError { + return fmt.Errorf("JSON error") + } + for key, value := range m.data { stats[key] = value } @@ -84,37 +102,23 @@ func (m mockHeaderProvider) JSON(stats map[string]interface{}) error { } func (m mockHeaderProvider) Text(buffer io.Writer) error { + if m.returnError { + return fmt.Errorf("Text error") + } + _, err := buffer.Write([]byte(m.text)) return err } func (m mockHeaderProvider) HTML(buffer io.Writer) error { + if m.returnError { + return fmt.Errorf("HTML error") + } + _, err := buffer.Write([]byte(m.html)) return err } -type errorMockProvider struct{} - -func (m errorMockProvider) Name() string { - return "error mock" -} - -func (m errorMockProvider) Section() string { - return "error section" -} - -func (m errorMockProvider) JSON(map[string]interface{}) error { - return fmt.Errorf("testing JSON errors") -} - -func (m errorMockProvider) Text(io.Writer) error { - return fmt.Errorf("testing Text errors") -} - -func (m errorMockProvider) HTML(io.Writer) error { - return fmt.Errorf("testing HTML errors") -} - var ( humanReadbaleFlavor = flavor.GetHumanReadableFlavor() agentVersion = version.AgentVersion @@ -129,41 +133,6 @@ var testTextHeader = fmt.Sprintf(`%s %s %s`, status.PrintDashes(testTitle, "="), testTitle, status.PrintDashes(testTitle, "=")) -var expectedStatusTextOutput = fmt.Sprintf(`%s - Status date: 2018-01-05 11:25:15 UTC (1515151515000) - Agent start: 2018-01-05 11:25:15 UTC (1515151515000) - Pid: %d - Go Version: %s - Python Version: n/a - Build arch: %s - Agent flavor: %s - Log Level: info - -========== -Header Foo -========== - header foo: header bar - header foo2: header bar 2 - -========= -Collector -========= - text from a - text from b - -========= -A Section -========= - text from a - -========= -X Section -========= - text from a - text from x - -`, testTextHeader, pid, goVersion, arch, agentFlavor) - func TestGetStatus(t *testing.T) { nowFunc = func() time.Time { return time.Unix(1515151515, 0) } startTimeProvider = time.Unix(1515151515, 0) @@ -272,6 +241,41 @@ func TestGetStatus(t *testing.T) { name: "Text", format: "text", assertFunc: func(t *testing.T, bytes []byte) { + expectedStatusTextOutput := fmt.Sprintf(`%s + Status date: 2018-01-05 11:25:15 UTC (1515151515000) + Agent start: 2018-01-05 11:25:15 UTC (1515151515000) + Pid: %d + Go Version: %s + Python Version: n/a + Build arch: %s + Agent flavor: %s + Log Level: info + +========== +Header Foo +========== + header foo: header bar + header foo2: header bar 2 + +========= +Collector +========= + text from a + text from b + +========= +A Section +========= + text from a + +========= +X Section +========= + text from a + text from x + +`, testTextHeader, pid, goVersion, arch, agentFlavor) + // We replace windows line break by linux so the tests pass on every OS expectedResult := strings.Replace(expectedStatusTextOutput, "\r\n", "\n", -1) output := strings.Replace(string(bytes), "\r\n", "\n", -1) @@ -345,31 +349,63 @@ func TestGetStatus(t *testing.T) { } } -var expectedStatusTextErrorOutput = fmt.Sprintf(`%s +func TestGetStatusDoNotRenderHeaderIfNoProviders(t *testing.T) { + nowFunc = func() time.Time { return time.Unix(1515151515, 0) } + startTimeProvider = time.Unix(1515151515, 0) + originalTZ := os.Getenv("TZ") + os.Setenv("TZ", "UTC") + + defer func() { + nowFunc = time.Now + startTimeProvider = pkgConfig.StartTime + os.Setenv("TZ", originalTZ) + }() + + deps := fxutil.Test[dependencies](t, fx.Options( + config.MockModule(), + fx.Supply( + status.NewInformationProvider(mockProvider{ + data: map[string]interface{}{ + "foo": "bar", + }, + name: "a", + text: " text from a\n", + section: "section", + }), + ), + )) + + statusComponent, err := newStatus(deps) + + assert.NoError(t, err) + + bytesResult, err := statusComponent.GetStatus("text", false) + + assert.NoError(t, err) + + expectedOutput := fmt.Sprintf(`%s Status date: 2018-01-05 11:25:15 UTC (1515151515000) Agent start: 2018-01-05 11:25:15 UTC (1515151515000) Pid: %d Go Version: %s Python Version: n/a Build arch: %s - Agent flavor: agent + Agent flavor: %s Log Level: info -========= -Collector -========= - text from b +======= +Section +======= + text from a -============= -Error Section -============= +`, testTextHeader, pid, goVersion, arch, agentFlavor) -==================== -Status render errors -==================== - - testing Text errors + // We replace windows line break by linux so the tests pass on every OS + expectedResult := strings.Replace(expectedOutput, "\r\n", "\n", -1) + output := strings.Replace(string(bytesResult), "\r\n", "\n", -1) -`, testTextHeader, pid, goVersion, arch) + assert.Equal(t, expectedResult, output) +} func TestGetStatusWithErrors(t *testing.T) { nowFunc = func() time.Time { return time.Unix(1515151515, 0) } @@ -386,7 +422,11 @@ func TestGetStatusWithErrors(t *testing.T) { deps := fxutil.Test[dependencies](t, fx.Options( config.MockModule(), fx.Supply( - status.NewInformationProvider(errorMockProvider{}), + status.NewInformationProvider(mockProvider{ + section: "error section", + name: "a", + returnError: true, + }), status.NewInformationProvider(mockProvider{ data: map[string]interface{}{ "foo2": "bar2", @@ -416,13 +456,39 @@ func TestGetStatusWithErrors(t *testing.T) { assert.NoError(t, err) - assert.Equal(t, "testing JSON errors", result["errors"].([]interface{})[0].(string)) + assert.Equal(t, "JSON error", result["errors"].([]interface{})[0].(string)) }, }, { name: "Text", format: "text", assertFunc: func(t *testing.T, bytes []byte) { + expectedStatusTextErrorOutput := fmt.Sprintf(`%s + Status date: 2018-01-05 11:25:15 UTC (1515151515000) + Agent start: 2018-01-05 11:25:15 UTC (1515151515000) + Pid: %d + Go Version: %s + Python Version: n/a + Build arch: %s + Agent flavor: agent + Log Level: info + +========= +Collector +========= + text from b + +============= +Error Section +============= + +==================== +Status render errors +==================== + - Text error + +`, testTextHeader, pid, goVersion, arch) + // We replace windows line break by linux so the tests pass on every OS expectedResult := strings.Replace(expectedStatusTextErrorOutput, "\r\n", "\n", -1) output := strings.Replace(string(bytes), "\r\n", "\n", -1) @@ -603,7 +669,16 @@ func TestGetStatusBySectionsWithErrors(t *testing.T) { deps := fxutil.Test[dependencies](t, fx.Options( config.MockModule(), fx.Supply( - status.NewInformationProvider(errorMockProvider{}), + status.NewInformationProvider(mockProvider{ + returnError: true, + section: "error section", + name: "a", + }), + status.NewHeaderInformationProvider(mockHeaderProvider{ + returnError: true, + name: "a", + index: 3, + }), status.NewInformationProvider(mockProvider{ data: map[string]interface{}{ "foo2": "bar2", @@ -622,23 +697,26 @@ func TestGetStatusBySectionsWithErrors(t *testing.T) { testCases := []struct { name string format string + section string assertFunc func(*testing.T, []byte) }{ { - name: "JSON", - format: "json", + name: "JSON", + format: "json", + section: "error section", assertFunc: func(t *testing.T, bytes []byte) { result := map[string]interface{}{} err = json.Unmarshal(bytes, &result) assert.NoError(t, err) - assert.Equal(t, "testing JSON errors", result["errors"].([]interface{})[0].(string)) + assert.Equal(t, "JSON error", result["errors"].([]interface{})[0].(string)) }, }, { - name: "Text", - format: "text", + name: "Text", + format: "text", + section: "error section", assertFunc: func(t *testing.T, bytes []byte) { expected := `============= Error Section @@ -646,7 +724,7 @@ Error Section ==================== Status render errors ==================== - - testing Text errors + - Text error ` @@ -654,6 +732,49 @@ Status render errors expectedResult := strings.Replace(expected, "\r\n", "\n", -1) output := strings.Replace(string(bytes), "\r\n", "\n", -1) + assert.Equal(t, expectedResult, output) + }, + }, + { + name: "Header section JSON format", + format: "json", + section: "header", + assertFunc: func(t *testing.T, bytes []byte) { + result := map[string]interface{}{} + err = json.Unmarshal(bytes, &result) + + assert.NoError(t, err) + + assert.Equal(t, "JSON error", result["errors"].([]interface{})[0].(string)) + }, + }, + { + name: "Header section text format", + format: "text", + section: "header", + assertFunc: func(t *testing.T, bytes []byte) { + + expectedStatusTextErrorOutput := fmt.Sprintf(`%s + Status date: 2018-01-05 11:25:15 UTC (1515151515000) + Agent start: 2018-01-05 11:25:15 UTC (1515151515000) + Pid: %d + Go Version: %s + Python Version: n/a + Build arch: %s + Agent flavor: agent + Log Level: info + +==================== +Status render errors +==================== + - Text error + +`, testTextHeader, pid, goVersion, arch) + + // We replace windows line break by linux so the tests pass on every OS + expectedResult := strings.Replace(expectedStatusTextErrorOutput, "\r\n", "\n", -1) + output := strings.Replace(string(bytes), "\r\n", "\n", -1) + assert.Equal(t, expectedResult, output) }, }, @@ -661,7 +782,7 @@ Status render errors for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { - bytesResult, err := statusComponent.GetStatusBySection("error section", testCase.format, false) + bytesResult, err := statusComponent.GetStatusBySection(testCase.section, testCase.format, false) assert.NoError(t, err) diff --git a/comp/logs/agent/agent.go b/comp/logs/agent/agent.go index f17e7464c4b99..0eac6b408408d 100644 --- a/comp/logs/agent/agent.go +++ b/comp/logs/agent/agent.go @@ -243,6 +243,10 @@ func (a *agent) AddScheduler(scheduler schedulers.Scheduler) { a.schedulers.AddScheduler(scheduler) } +func (a *agent) GetSources() *sources.LogSources { + return a.sources +} + func (a *agent) GetMessageReceiver() *diagnostic.BufferedMessageReceiver { return a.diagnosticMessageReceiver } diff --git a/comp/logs/agent/component.go b/comp/logs/agent/component.go index 37d2016ccf183..f1f3d15638fb1 100644 --- a/comp/logs/agent/component.go +++ b/comp/logs/agent/component.go @@ -14,6 +14,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/diagnostic" "github.com/DataDog/datadog-agent/pkg/logs/pipeline" "github.com/DataDog/datadog-agent/pkg/logs/schedulers" + "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -24,6 +25,9 @@ type Component interface { // AddScheduler adds an AD scheduler to the logs agent AddScheduler(scheduler schedulers.Scheduler) + // Get the logs sources + GetSources() *sources.LogSources + // GetMessageReceiver gets the diagnostic message receiver GetMessageReceiver() *diagnostic.BufferedMessageReceiver @@ -44,6 +48,8 @@ type ServerlessLogsAgent interface { // Mock implements mock-specific methods. type Mock interface { Component + + SetSources(sources *sources.LogSources) } // Module defines the fx options for this component. diff --git a/comp/logs/agent/config/integration_config.go b/comp/logs/agent/config/integration_config.go index d4c8db84b5d4a..52192fee3f01f 100644 --- a/comp/logs/agent/config/integration_config.go +++ b/comp/logs/agent/config/integration_config.go @@ -6,6 +6,7 @@ package config import ( + "encoding/json" "fmt" "strings" "sync" @@ -161,6 +162,39 @@ func (c *LogsConfig) Dump(multiline bool) string { return b.String() } +// PublicJSON serialize the structure to make sure we only export fields that can be relevant to customers. +// This is used to send the logs config to the backend as part of the metadata payload. +func (c *LogsConfig) PublicJSON() ([]byte, error) { + // Export only fields that are explicitly documented in the public documentation + return json.Marshal(&struct { + Type string `json:"type,omitempty"` + Port int `json:"port,omitempty"` // Network + Path string `json:"path,omitempty"` // File, Journald + Encoding string `json:"encoding,omitempty"` // File + ExcludePaths []string `json:"exclude_paths,omitempty"` // File + TailingMode string `json:"start_position,omitempty"` // File + ChannelPath string `json:"channel_path,omitempty"` // Windows Event + Service string `json:"service,omitempty"` + Source string `json:"source,omitempty"` + Tags []string `json:"tags,omitempty"` + ProcessingRules []*ProcessingRule `json:"log_processing_rules,omitempty"` + AutoMultiLine *bool `json:"auto_multi_line_detection,omitempty"` + }{ + Type: c.Type, + Port: c.Port, + Path: c.Path, + Encoding: c.Encoding, + ExcludePaths: c.ExcludePaths, + TailingMode: c.TailingMode, + ChannelPath: c.ChannelPath, + Service: c.Service, + Source: c.Source, + Tags: c.Tags, + ProcessingRules: c.ProcessingRules, + AutoMultiLine: c.AutoMultiLine, + }) +} + // TailingMode type type TailingMode uint8 diff --git a/comp/logs/agent/config/integration_config_test.go b/comp/logs/agent/config/integration_config_test.go index c24f597763318..7318c5a1113ab 100644 --- a/comp/logs/agent/config/integration_config_test.go +++ b/comp/logs/agent/config/integration_config_test.go @@ -91,3 +91,19 @@ func TestConfigDump(t *testing.T) { dump := config.Dump(true) assert.Contains(t, dump, `Path: "/var/log/foo.log",`) } + +func TestPublicJSON(t *testing.T) { + config := LogsConfig{ + Type: FileType, + Path: "/var/log/foo.log", + Encoding: "utf-8", + Service: "foo", + Tags: []string{"foo:bar"}, + Source: "bar", + } + ret, err := config.PublicJSON() + assert.NoError(t, err) + + expectedJSON := `{"type":"file","path":"/var/log/foo.log","encoding":"utf-8","service":"foo","source":"bar","tags":["foo:bar"]}` + assert.Equal(t, expectedJSON, string(ret)) +} diff --git a/comp/logs/agent/mock.go b/comp/logs/agent/mock.go index da21ba9a5bd12..7fed52e93c5d1 100644 --- a/comp/logs/agent/mock.go +++ b/comp/logs/agent/mock.go @@ -12,6 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/diagnostic" "github.com/DataDog/datadog-agent/pkg/logs/pipeline" "github.com/DataDog/datadog-agent/pkg/logs/schedulers" + "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/util/optional" "go.uber.org/fx" ) @@ -21,6 +22,7 @@ type mockLogsAgent struct { addedSchedulers []schedulers.Scheduler hasFlushed bool flushDelay time.Duration + logSources *sources.LogSources } func newMock(deps dependencies) optional.Option[Mock] { @@ -51,6 +53,10 @@ func (a *mockLogsAgent) AddScheduler(scheduler schedulers.Scheduler) { a.addedSchedulers = append(a.addedSchedulers, scheduler) } +func (a *mockLogsAgent) SetSources(sources *sources.LogSources) { + a.logSources = sources +} + func (a *mockLogsAgent) IsRunning() bool { return a.isRunning } @@ -59,6 +65,10 @@ func (a *mockLogsAgent) GetMessageReceiver() *diagnostic.BufferedMessageReceiver return nil } +func (a *mockLogsAgent) GetSources() *sources.LogSources { + return a.logSources +} + // Serverless methods func (a *mockLogsAgent) Start() error { return a.start(context.TODO()) diff --git a/comp/metadata/bundle_test.go b/comp/metadata/bundle_test.go index 6db29988fbc9f..03bf8d13f1c9b 100644 --- a/comp/metadata/bundle_test.go +++ b/comp/metadata/bundle_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/DataDog/datadog-agent/comp/core" + "github.com/DataDog/datadog-agent/comp/logs/agent" "github.com/DataDog/datadog-agent/comp/metadata/runner/runnerimpl" "github.com/DataDog/datadog-agent/pkg/collector" "github.com/DataDog/datadog-agent/pkg/serializer" @@ -24,6 +25,9 @@ func TestBundleDependencies(t *testing.T) { fx.Provide(func() optional.Option[collector.Collector] { return optional.NewOption[collector.Collector](collector.NewMock(nil)) }), + fx.Provide(func() optional.Option[agent.Component] { + return optional.NewNoneOption[agent.Component]() + }), ) } diff --git a/comp/metadata/inventoryagent/component.go b/comp/metadata/inventoryagent/component.go index 96510ef56f787..c93ebafbd737f 100644 --- a/comp/metadata/inventoryagent/component.go +++ b/comp/metadata/inventoryagent/component.go @@ -22,8 +22,6 @@ type Component interface { GetAsJSON() ([]byte, error) // Get returns a copy of the agent metadata. Useful to be incorporated in the status page. Get() map[string]interface{} - // Refresh trigger a new payload to be send while still respecting the minimal interval between two updates. - Refresh() } // Module defines the fx options for this component. diff --git a/comp/metadata/inventoryagent/inventoryagent.go b/comp/metadata/inventoryagent/inventoryagent.go index 17e16d6de41ef..795cf43366d9a 100644 --- a/comp/metadata/inventoryagent/inventoryagent.go +++ b/comp/metadata/inventoryagent/inventoryagent.go @@ -95,6 +95,8 @@ func newInventoryAgentProvider(deps dependencies) provides { if ia.Enabled { ia.initData() + // We want to be notified when the configuration is updated + deps.Config.OnUpdate(func(_ string) { ia.Refresh() }) } return provides{ diff --git a/comp/metadata/inventoryagent/inventoryagent_test.go b/comp/metadata/inventoryagent/inventoryagent_test.go index c0e92af3ff06b..18e3b6cce3cde 100644 --- a/comp/metadata/inventoryagent/inventoryagent_test.go +++ b/comp/metadata/inventoryagent/inventoryagent_test.go @@ -15,6 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/log/logimpl" pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -232,3 +233,11 @@ func TestFlareProviderFilename(t *testing.T) { ia := getTestInventoryPayload(t, nil) assert.Equal(t, "agent.json", ia.FlareFileName) } + +func TestConfigRefresh(t *testing.T) { + ia := getTestInventoryPayload(t, nil) + + assert.False(t, ia.ForceRefresh) + pkgconfig.Datadog.Set("inventories_max_interval", 10*time.Minute, pkgconfigmodel.SourceAgentRuntime) + assert.True(t, ia.ForceRefresh) +} diff --git a/comp/metadata/inventorychecks/README.md b/comp/metadata/inventorychecks/README.md index 27a60e316953f..88a8a5a947789 100644 --- a/comp/metadata/inventorychecks/README.md +++ b/comp/metadata/inventorychecks/README.md @@ -29,6 +29,13 @@ The payload is a JSON dict with the following fields - `init_config` - **string**: the `init_config` part of the configuration for this check instance. - `instance_config` - **string**: the YAML configuration for this check instance - Any other metadata registered by the instance (instance version, version of the software monitored, ...). +- `logs_metadata` - **dict of string to list**: dictionary with the log source names as keys; values are a list of the metadata + for each instance of that log source. + Each instance is composed of: + - `config` - **string**: the canonical JSON of the log source configuration. + - `state` - **dict of string**: the current state of the log source. + - `status` - **string**: one of `pending`, `error` or `success`. + - `error` - **string**: the error description if any. ("scrubbed" indicates that secrets are removed from the field value just as they are in logs) @@ -104,6 +111,32 @@ Here an example of an inventory payload: } ] }, + "logs_metadata": { + "redisdb": [ + { + "config": "{\"path\":\"/var/log/redis_6379.log\",\"service\":\"myredis2\",\"source\":\"redis\",\"type\":\"file\",\"tags\":[\"env:prod\"]}", + "service": "awesome_cache", + "source": "source1", + "state": { + "error": "Error: cannot read file /var/log/redis_6379.log: stat /var/log/redis_6379.log: no such file or directory", + "status": "error" + }, + "tags": ["env:prod"] + } + ], + "nginx": [ + { + "config": "{\"path\":\"/var/log/nginx/access.log\",\"service\":\"nginx\",\"source\":\"nginx\",\"type\":\"file\"}", + "service": "nginx", + "source": "source2", + "state": { + "error": "", + "status": "success" + }, + "tags": [] + } + ] + } "hostname": "my-host", "timestamp": 1631281754507358895 } diff --git a/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks.go b/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks.go index fd1faf1339d03..56d3181739ed4 100644 --- a/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks.go +++ b/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks.go @@ -17,12 +17,14 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" flaretypes "github.com/DataDog/datadog-agent/comp/core/flare/types" "github.com/DataDog/datadog-agent/comp/core/log" + logagent "github.com/DataDog/datadog-agent/comp/logs/agent" "github.com/DataDog/datadog-agent/comp/metadata/internal/util" "github.com/DataDog/datadog-agent/comp/metadata/inventorychecks" "github.com/DataDog/datadog-agent/comp/metadata/runner/runnerimpl" "github.com/DataDog/datadog-agent/pkg/collector" "github.com/DataDog/datadog-agent/pkg/collector/check" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" + "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/serializer/marshaler" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -43,9 +45,10 @@ type checksMetadata map[string][]metadata // Payload handles the JSON unmarshalling of the metadata payload type Payload struct { - Hostname string `json:"hostname"` - Timestamp int64 `json:"timestamp"` - Metadata map[string][]metadata `json:"check_metadata"` + Hostname string `json:"hostname"` + Timestamp int64 `json:"timestamp"` + Metadata map[string][]metadata `json:"check_metadata"` + LogsMetadata map[string][]metadata `json:"logs_metadata"` } // MarshalJSON serialization a Payload to JSON @@ -77,6 +80,7 @@ type inventorychecksImpl struct { log log.Component conf config.Component coll optional.Option[collector.Collector] + sources optional.Option[*sources.LogSources] hostname string } @@ -87,6 +91,7 @@ type dependencies struct { Config config.Component Serializer serializer.MetricSerializer Coll optional.Option[collector.Collector] + LogAgent optional.Option[logagent.Component] } type provides struct { @@ -103,6 +108,7 @@ func newInventoryChecksProvider(deps dependencies) provides { conf: deps.Config, log: deps.Log, coll: deps.Coll, + sources: optional.NewNoneOption[*sources.LogSources](), hostname: hname, data: map[string]instanceMetadata{}, } @@ -116,6 +122,10 @@ func newInventoryChecksProvider(deps dependencies) provides { coll.AddEventReceiver(func(_ checkid.ID, _ collector.EventType) { ic.Refresh() }) } + if logAgent, isSet := deps.LogAgent.Get(); isSet { + ic.sources.Set(logAgent.GetSources()) + } + return provides{ Comp: ic, Provider: ic.MetadataProvider(), @@ -197,9 +207,42 @@ func (ic *inventorychecksImpl) getPayload() marshaler.JSONMarshaler { } } + logsMetadata := make(map[string][]metadata) + if sources, isSet := ic.sources.Get(); isSet { + if sources != nil { + for _, logSource := range sources.GetSources() { + if _, found := logsMetadata[logSource.Name]; !found { + logsMetadata[logSource.Name] = []metadata{} + } + + parsedJSON, err := logSource.Config.PublicJSON() + if err != nil { + ic.log.Debugf("could not parse log configuration for source metadata %s: %v", logSource.Name, err) + continue + } + + tags := logSource.Config.Tags + if tags == nil { + tags = []string{} + } + logsMetadata[logSource.Name] = append(logsMetadata[logSource.Name], metadata{ + "config": string(parsedJSON), + "state": map[string]string{ + "error": logSource.Status.GetError(), + "status": logSource.Status.String(), + }, + "service": logSource.Config.Service, + "source": logSource.Config.Source, + "tags": tags, + }) + } + } + } + return &Payload{ - Hostname: ic.hostname, - Timestamp: time.Now().UnixNano(), - Metadata: payloadData, + Hostname: ic.hostname, + Timestamp: time.Now().UnixNano(), + Metadata: payloadData, + LogsMetadata: logsMetadata, } } diff --git a/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go b/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go index e00c66deec8de..7237c88e2797b 100644 --- a/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go +++ b/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go @@ -6,23 +6,29 @@ package inventorychecksimpl import ( + "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "go.uber.org/fx" + "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/log/logimpl" + logagent "github.com/DataDog/datadog-agent/comp/logs/agent" + logConfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" + "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" "github.com/DataDog/datadog-agent/pkg/collector" "github.com/DataDog/datadog-agent/pkg/collector/check" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" + "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/optional" ) -func getTestInventoryChecks(t *testing.T, coll optional.Option[collector.Collector], overrides map[string]any) *inventorychecksImpl { +func getTestInventoryChecks(t *testing.T, coll optional.Option[collector.Collector], logAgent optional.Option[logagent.Component], overrides map[string]any) *inventorychecksImpl { p := newInventoryChecksProvider( fxutil.Test[dependencies]( t, @@ -33,13 +39,18 @@ func getTestInventoryChecks(t *testing.T, coll optional.Option[collector.Collect fx.Provide(func() optional.Option[collector.Collector] { return coll }), + fx.Provide(func() optional.Option[logagent.Component] { + return logAgent + }), ), ) return p.Comp.(*inventorychecksImpl) } func TestSet(t *testing.T) { - ic := getTestInventoryChecks(t, optional.NewNoneOption[collector.Collector](), nil) + ic := getTestInventoryChecks( + t, optional.NewNoneOption[collector.Collector](), optional.Option[logagent.Component]{}, nil, + ) ic.Set("instance_1", "key", "value") @@ -56,7 +67,9 @@ func TestSet(t *testing.T) { } func TestSetEmptyInstance(t *testing.T) { - ic := getTestInventoryChecks(t, optional.NewNoneOption[collector.Collector](), nil) + ic := getTestInventoryChecks( + t, optional.NewNoneOption[collector.Collector](), optional.Option[logagent.Component]{}, nil, + ) ic.Set("", "key", "value") @@ -64,7 +77,9 @@ func TestSetEmptyInstance(t *testing.T) { } func TestGetInstanceMetadata(t *testing.T) { - ic := getTestInventoryChecks(t, optional.NewNoneOption[collector.Collector](), nil) + ic := getTestInventoryChecks( + t, optional.NewNoneOption[collector.Collector](), optional.Option[logagent.Component]{}, nil, + ) ic.Set("instance_1", "key1", "value1") ic.Set("instance_1", "key2", "value2") @@ -115,8 +130,28 @@ func TestGetPayload(t *testing.T) { mockColl.On("AddEventReceiver", mock.AnythingOfType("EventReceiver")).Return() mockColl.On("MapOverChecks", mock.AnythingOfType("func([]check.Info)")).Return() + // Setup log sources + logSources := sources.NewLogSources() + src := sources.NewLogSource("redisdb", &logConfig.LogsConfig{ + Type: logConfig.FileType, + Path: "/var/log/redis/redis.log", + Identifier: "redisdb", + Service: "awesome_cache", + Source: "redis", + Tags: []string{"env:prod"}, + }) + // Register an error + src.Status.Error(fmt.Errorf("No such file or directory")) + logSources.AddSource(src) + mockLogAgent := fxutil.Test[optional.Option[logagent.Mock]]( + t, logagent.MockModule(), core.MockBundle(), inventoryagent.MockModule(), + ) + logsAgent, _ := mockLogAgent.Get() + logsAgent.SetSources(logSources) + ic := getTestInventoryChecks(t, optional.NewOption[collector.Collector](mockColl), + optional.NewOption[logagent.Component](logsAgent), overrides, ) @@ -156,9 +191,27 @@ func TestGetPayload(t *testing.T) { // Check that metadata linked to non-existing check were deleted assert.NotContains(t, "non_running_checkid", ic.data) + + // Check the log sources part of the metadata + assert.Len(t, p.LogsMetadata, 1) + actualSource, found := p.LogsMetadata["redisdb"] + assert.True(t, found) + assert.Len(t, actualSource, 1) + expectedSourceConfig := `{"type":"file","path":"/var/log/redis/redis.log","service":"awesome_cache","source":"redis","tags":["env:prod"]}` + assert.Equal(t, expectedSourceConfig, actualSource[0]["config"]) + expectedSourceStatus := map[string]string{ + "status": "error", + "error": "Error: No such file or directory", + } + assert.Equal(t, expectedSourceStatus, actualSource[0]["state"]) + assert.Equal(t, "awesome_cache", actualSource[0]["service"]) + assert.Equal(t, "redis", actualSource[0]["source"]) + assert.Equal(t, []string{"env:prod"}, actualSource[0]["tags"]) } func TestFlareProviderFilename(t *testing.T) { - ic := getTestInventoryChecks(t, optional.NewNoneOption[collector.Collector](), nil) + ic := getTestInventoryChecks( + t, optional.NewNoneOption[collector.Collector](), optional.Option[logagent.Component]{}, nil, + ) assert.Equal(t, "checks.json", ic.FlareFileName) } diff --git a/comp/otelcol/otlp/config.go b/comp/otelcol/otlp/config.go index daaaa42b79d66..d420abea758fd 100644 --- a/comp/otelcol/otlp/config.go +++ b/comp/otelcol/otlp/config.go @@ -11,12 +11,12 @@ import ( "fmt" "strings" + "github.com/mohae/deepcopy" "go.opentelemetry.io/collector/confmap" "go.uber.org/multierr" "github.com/DataDog/datadog-agent/comp/core/config" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" - "github.com/mohae/deepcopy" + coreconfig "github.com/DataDog/datadog-agent/pkg/config/setup" ) func portToUint(v int) (port uint, err error) { diff --git a/comp/process/apiserver/apiserver.go b/comp/process/apiserver/apiserver.go index ea28657e60cbf..09247498fafe4 100644 --- a/comp/process/apiserver/apiserver.go +++ b/comp/process/apiserver/apiserver.go @@ -88,7 +88,7 @@ func newApiServer(deps dependencies) Component { func initRuntimeSettings(logger log.Component) { // NOTE: Any settings you want to register should simply be added here processRuntimeSettings := []settings.RuntimeSetting{ - settings.NewLogLevelRuntimeSetting(nil), + settings.NewLogLevelRuntimeSetting(), settings.NewRuntimeMutexProfileFraction(), settings.NewRuntimeBlockProfileRate(), settings.NewProfilingGoroutines(), diff --git a/docs/cloud-workload-security/backend.md b/docs/cloud-workload-security/backend.md index 2408294824d25..2da537d3a897e 100644 --- a/docs/cloud-workload-security/backend.md +++ b/docs/cloud-workload-security/backend.md @@ -221,6 +221,10 @@ CSM Threats logs have the following JSON schema: }, "type": "array", "description": "The list of rules that the event matched (only valid in the context of an anomaly)" + }, + "origin": { + "type": "string", + "description": "Origin of the event" } }, "additionalProperties": false, @@ -1758,6 +1762,10 @@ CSM Threats logs have the following JSON schema: }, "type": "array", "description": "The list of rules that the event matched (only valid in the context of an anomaly)" + }, + "origin": { + "type": "string", + "description": "Origin of the event" } }, "additionalProperties": false, @@ -1774,6 +1782,7 @@ CSM Threats logs have the following JSON schema: | `outcome` | Event outcome | | `async` | True if the event was asynchronous | | `matched_rules` | The list of rules that the event matched (only valid in the context of an anomaly) | +| `origin` | Origin of the event | ## `ExitEvent` diff --git a/go.mod b/go.mod index 6b3a15b53df72..efcd6042e44ff 100644 --- a/go.mod +++ b/go.mod @@ -24,6 +24,8 @@ replace ( ) replace ( + github.com/DataDog/datadog-agent/cmd/agent/common/path => ./cmd/agent/common/path/ + github.com/DataDog/datadog-agent/comp/core/config => ./comp/core/config/ github.com/DataDog/datadog-agent/comp/core/flare/types => ./comp/core/flare/types github.com/DataDog/datadog-agent/comp/core/secrets => ./comp/core/secrets github.com/DataDog/datadog-agent/comp/core/telemetry => ./comp/core/telemetry/ @@ -33,6 +35,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/logs => ./pkg/config/logs github.com/DataDog/datadog-agent/pkg/config/model => ./pkg/config/model/ github.com/DataDog/datadog-agent/pkg/config/remote => ./pkg/config/remote/ + github.com/DataDog/datadog-agent/pkg/config/setup => ./pkg/config/setup/ github.com/DataDog/datadog-agent/pkg/errors => ./pkg/errors github.com/DataDog/datadog-agent/pkg/gohai => ./pkg/gohai github.com/DataDog/datadog-agent/pkg/metrics => ./pkg/metrics/ @@ -57,6 +60,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/filesystem => ./pkg/util/filesystem github.com/DataDog/datadog-agent/pkg/util/fxutil => ./pkg/util/fxutil/ github.com/DataDog/datadog-agent/pkg/util/grpc => ./pkg/util/grpc/ + github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ./pkg/util/hostname/validate/ github.com/DataDog/datadog-agent/pkg/util/http => ./pkg/util/http/ github.com/DataDog/datadog-agent/pkg/util/json => ./pkg/util/json github.com/DataDog/datadog-agent/pkg/util/log => ./pkg/util/log @@ -89,7 +93,7 @@ require ( github.com/DataDog/datadog-go/v5 v5.4.0 // do not update datadog-operator to 1.2.1 because the indirect dependency github.com/DataDog/datadog-api-client-go/v2 v2.15.0 is trigger a huge Go heap memory increase. github.com/DataDog/datadog-operator v1.1.0 - github.com/DataDog/ebpf-manager v0.3.9 + github.com/DataDog/ebpf-manager v0.4.0 github.com/DataDog/gopsutil v1.2.2 github.com/DataDog/nikos v1.12.1 github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.9.0 @@ -587,6 +591,8 @@ require github.com/lorenzosaino/go-sysctl v0.3.1 require ( github.com/DATA-DOG/go-sqlmock v1.5.0 github.com/DataDog/agent-payload/v5 v5.0.100 + github.com/DataDog/datadog-agent/cmd/agent/common/path v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/core/config v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/comp/core/flare/types v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/comp/core/secrets v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/comp/core/telemetry v0.50.0-rc.4 @@ -596,6 +602,7 @@ require ( github.com/DataDog/datadog-agent/pkg/config/logs v0.50.0-rc.4 github.com/DataDog/datadog-agent/pkg/config/model v0.50.0-rc.4 github.com/DataDog/datadog-agent/pkg/config/remote v0.50.0-rc.4 + github.com/DataDog/datadog-agent/pkg/config/setup v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/pkg/errors v0.50.0-rc.4 github.com/DataDog/datadog-agent/pkg/metrics v0.50.0-rc.4 github.com/DataDog/datadog-agent/pkg/networkdevice/profile v0.50.0-rc.4 @@ -613,6 +620,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/filesystem v0.50.0-rc.4 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.50.0-rc.4 github.com/DataDog/datadog-agent/pkg/util/grpc v0.50.0-rc.4 + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/pkg/util/http v0.50.0-rc.4 github.com/DataDog/datadog-agent/pkg/util/json v0.50.0-rc.4 github.com/DataDog/datadog-agent/pkg/util/optional v0.50.0-rc.4 @@ -635,7 +643,7 @@ require ( github.com/jmoiron/sqlx v1.3.5 github.com/kr/pretty v0.3.1 github.com/protocolbuffers/protoscope v0.0.0-20221109213918-8e7a6aafa2c9 - github.com/sijms/go-ora/v2 v2.7.26 + github.com/sijms/go-ora/v2 v2.8.1 go.opentelemetry.io/collector/extension v0.91.0 go.opentelemetry.io/collector/otelcol v0.91.0 go.opentelemetry.io/collector/processor v0.91.0 @@ -649,7 +657,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/buf v0.50.0-rc.4 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.50.0-rc.4 // indirect github.com/DataDog/datadog-api-client-go/v2 v2.13.0 // indirect - github.com/DataDog/go-sqllexer v0.0.8 // indirect + github.com/DataDog/go-sqllexer v0.0.9 // indirect github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect diff --git a/go.sum b/go.sum index 79a1584ff1e43..e8be398803771 100644 --- a/go.sum +++ b/go.sum @@ -134,16 +134,16 @@ github.com/DataDog/datadog-go/v5 v5.4.0 h1:Ea3eXUVwrVV28F/fo3Dr3aa+TL/Z7Xi6SUPKW github.com/DataDog/datadog-go/v5 v5.4.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= github.com/DataDog/datadog-operator v1.1.0 h1:cSZqKarzM66GR0T1pPPZVopz4oPm3ltcRyqJ/h/6eJg= github.com/DataDog/datadog-operator v1.1.0/go.mod h1:3aWOjI4EaE1jYVN6llOXygA9nasy70GCa1XnTIWNoCY= -github.com/DataDog/ebpf-manager v0.3.9 h1:ysIyB7eGjjUSenxV43BoDBrauOwj+pzjlLwwi4Ly8t8= -github.com/DataDog/ebpf-manager v0.3.9/go.mod h1:GQ0AnVyn0Oi+NzBOBP1UB5kHGa5YZRxlFDhg4VGAo3s= +github.com/DataDog/ebpf-manager v0.4.0 h1:ZxOnHD9h4qrmGKEdt0TmSdDd0mRNJ+HfsG0ZH4wQaB0= +github.com/DataDog/ebpf-manager v0.4.0/go.mod h1:GQ0AnVyn0Oi+NzBOBP1UB5kHGa5YZRxlFDhg4VGAo3s= github.com/DataDog/extendeddaemonset v0.9.0-rc.2 h1:uTE/QEU0oYtHnebKSMbxap7XMG5603WQxNP/UX63E7k= github.com/DataDog/extendeddaemonset v0.9.0-rc.2/go.mod h1:JgKVGTsjdTdtJjNyxRZjcs81/rng6LJ3XX/0D7Y12Gc= github.com/DataDog/go-grpc-bidirectional-streaming-example v0.0.0-20221024060302-b9cf785c02fe h1:RO40ywnX/vZLi4Pb4jRuFGgQQBYGIIoQ6u+P2MIgFOA= github.com/DataDog/go-grpc-bidirectional-streaming-example v0.0.0-20221024060302-b9cf785c02fe/go.mod h1:90sqV0j7E8wYCyqIp5d9HmYWLTFQttqPFFtNYDyAybQ= github.com/DataDog/go-libddwaf/v2 v2.2.2 h1:WS0l3qcPju2U4Ot+vr02f525YfW9RcoQfvpoV1410ac= github.com/DataDog/go-libddwaf/v2 v2.2.2/go.mod h1:UH7CLwSL++Ij9U7LmdZRH+71hzD+AfH28lF7pTTpWhs= -github.com/DataDog/go-sqllexer v0.0.8 h1:vfC8R9PhmJfeOKcFYAX9UOd890A3wu3KrjU9Kr7nM0E= -github.com/DataDog/go-sqllexer v0.0.8/go.mod h1:nB4Ea2YNsqMwtbWMc4Fm/oP98IIrSPapqwOwPioMspY= +github.com/DataDog/go-sqllexer v0.0.9 h1:Cx2Cu1S0hfj4coCCA8hzjM9+UNFRkcu1avIV//RU5Qw= +github.com/DataDog/go-sqllexer v0.0.9/go.mod h1:nB4Ea2YNsqMwtbWMc4Fm/oP98IIrSPapqwOwPioMspY= github.com/DataDog/go-tuf v1.0.2-0.5.2 h1:EeZr937eKAWPxJ26IykAdWA4A0jQXJgkhUjqEI/w7+I= github.com/DataDog/go-tuf v1.0.2-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/gopsutil v1.2.2 h1:8lmthwyyCXa1NKiYcHlrtl9AAFdfbNI2gPcioCJcBPU= @@ -1448,8 +1448,8 @@ github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFR github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY= github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg= -github.com/sijms/go-ora/v2 v2.7.26 h1:+o/1ej7znA/Wpklv9eTiyp4Jqn2DU9Urw9YHrtPRP64= -github.com/sijms/go-ora/v2 v2.7.26/go.mod h1:EHxlY6x7y9HAsdfumurRfTd+v8NrEOTR3Xl4FWlH6xk= +github.com/sijms/go-ora/v2 v2.8.1 h1:nI7pIasv00Zn5IqINb4nPY8p74roCJr/LVZQ8hYDXt0= +github.com/sijms/go-ora/v2 v2.8.1/go.mod h1:EHxlY6x7y9HAsdfumurRfTd+v8NrEOTR3Xl4FWlH6xk= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= diff --git a/omnibus/config/projects/updater.rb b/omnibus/config/projects/updater.rb new file mode 100644 index 0000000000000..6fbcfac5e842b --- /dev/null +++ b/omnibus/config/projects/updater.rb @@ -0,0 +1,130 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the Apache License Version 2.0. +# This product includes software developed at Datadog (https:#www.datadoghq.com/). +# Copyright 2016-present Datadog, Inc. +require "./lib/ostools.rb" + +name 'updater' +package_name 'datadog-updater' +license "Apache-2.0" +license_file "../LICENSE" + +third_party_licenses "../LICENSE-3rdparty.csv" + +homepage 'http://www.datadoghq.com' + +INSTALL_DIR = '/opt/datadog/updater' + +install_dir INSTALL_DIR + +if redhat_target? || suse_target? + maintainer 'Datadog, Inc ' + + # NOTE: with script dependencies, we only care about preinst/postinst/posttrans, + # because these would be used in a kickstart during package installation phase. + # All of the packages that we depend on in prerm/postrm scripts always have to be + # installed on all distros that we support, so we don't have to depend on them + # explicitly. + + # postinst and posttrans scripts use a subset of preinst script deps, so we don't + # have to list them, because they'll already be there because of preinst + runtime_script_dependency :pre, "coreutils" + runtime_script_dependency :pre, "findutils" + runtime_script_dependency :pre, "grep" + if redhat_target? + runtime_script_dependency :pre, "glibc-common" + runtime_script_dependency :pre, "shadow-utils" + else + runtime_script_dependency :pre, "glibc" + runtime_script_dependency :pre, "shadow" + end +else + maintainer 'Datadog Packages ' +end + +if debian_target? + runtime_recommended_dependency 'datadog-signing-keys (>= 1:1.3.1)' +end + +# build_version is computed by an invoke command/function. +# We can't call it directly from there, we pass it through the environment instead. +build_version ENV['PACKAGE_VERSION'] + +build_iteration 1 + +description 'Datadog Updater + The Datadog Updater is a lightweight process that updates the Datadog Agent + and Tracers. + + See http://www.datadoghq.com/ for more information +' + +# ------------------------------------ +# Generic package information +# ------------------------------------ + +# .deb specific flags +package :deb do + vendor 'Datadog ' + epoch 1 + license 'Apache License Version 2.0' + section 'utils' + priority 'extra' + if ENV.has_key?('DEB_SIGNING_PASSPHRASE') and not ENV['DEB_SIGNING_PASSPHRASE'].empty? + signing_passphrase "#{ENV['DEB_SIGNING_PASSPHRASE']}" + if ENV.has_key?('DEB_GPG_KEY_NAME') and not ENV['DEB_GPG_KEY_NAME'].empty? + gpg_key_name "#{ENV['DEB_GPG_KEY_NAME']}" + end + end +end + +# ------------------------------------ +# Dependencies +# ------------------------------------ + +# creates required build directories +dependency 'preparation' + +dependency 'updater' + +# version manifest file +dependency 'version-manifest' + +if linux_target? + systemd_directory = "/usr/lib/systemd/system" + if debian_target? + systemd_directory = "/lib/systemd/system" + end + extra_package_file "#{systemd_directory}/datadog-updater.service" + extra_package_file "#{systemd_directory}/datadog-agent.service" + extra_package_file "#{systemd_directory}/datadog-agent-exp.service" + extra_package_file "#{systemd_directory}/datadog-agent-trace.service" + extra_package_file "#{systemd_directory}/datadog-agent-trace-exp.service" + extra_package_file "#{systemd_directory}/datadog-agent-process.service" + extra_package_file "#{systemd_directory}/datadog-agent-process-exp.service" + extra_package_file "#{systemd_directory}/datadog-agent-security.service" + extra_package_file "#{systemd_directory}/datadog-agent-security-exp.service" + extra_package_file "#{systemd_directory}/datadog-agent-sysprobe.service" + extra_package_file "#{systemd_directory}/datadog-agent-sysprobe-exp.service" + extra_package_file "#{systemd_directory}/start-experiment.path" + extra_package_file "#{systemd_directory}/stop-experiment.path" + extra_package_file '/etc/datadog-agent/' + extra_package_file '/var/log/datadog/' +end + +if linux_target? + if debian_target? + package_scripts_path "#{Omnibus::Config.project_root}/package-scripts/updater-deb" + end +end + +exclude '\.git*' +exclude 'bundler\/git' + +if linux_target? + # the stripper will drop the symbols in a `.debug` folder in the installdir + # we want to make sure that directory is not in the main build, while present + # in the debug package. + strip_build true + debug_path ".debug" # the strip symbols will be in here +end diff --git a/omnibus/config/software/updater.rb b/omnibus/config/software/updater.rb new file mode 100644 index 0000000000000..0918f2de9bcac --- /dev/null +++ b/omnibus/config/software/updater.rb @@ -0,0 +1,84 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the Apache License Version 2.0. +# This product includes software developed at Datadog (https:#www.datadoghq.com/). +# Copyright 2016-present Datadog, Inc. + +require './lib/ostools.rb' +require 'pathname' + +name 'updater' + +source path: '..' +relative_path 'src/github.com/DataDog/datadog-agent' + +build do + license :project_license + + # set GOPATH on the omnibus source dir for this software + gopath = Pathname.new(project_dir) + '../../../..' + etc_dir = "/etc/datadog-agent" + gomodcache = Pathname.new("/modcache") + env = { + 'GOPATH' => gopath.to_path, + 'PATH' => "#{gopath.to_path}/bin:#{ENV['PATH']}", + } + + unless ENV["OMNIBUS_GOMODCACHE"].nil? || ENV["OMNIBUS_GOMODCACHE"].empty? + gomodcache = Pathname.new(ENV["OMNIBUS_GOMODCACHE"]) + env["GOMODCACHE"] = gomodcache.to_path + end + + # include embedded path (mostly for `pkg-config` binary) + env = with_embedded_path(env) + + if linux_target? + command "invoke updater.build --rebuild", env: env + mkdir "#{install_dir}/bin" + mkdir "#{install_dir}/run/" + + + # Config + mkdir '/etc/datadog-agent' + mkdir "/etc/init" + mkdir "/var/log/datadog" + + move 'bin/agent/dist/datadog.yaml', '/etc/datadog-agent/datadog.yaml.example' + move 'bin/agent/dist/conf.d', '/etc/datadog-agent/' + copy 'bin/updater', "#{install_dir}/bin/" + + # Systemd + systemdPath = "/lib/systemd/system/" + if not debian_target? + mkdir "/usr/lib/systemd/system/" + systemdPath = "/usr/lib/systemd/system/" + end + templateToFile = { + "datadog-agent.service.erb" => "datadog-agent.service", + "datadog-agent-exp.service.erb" => "datadog-agent-exp.service", + "datadog-agent-trace.service.erb" => "datadog-agent-trace.service", + "datadog-agent-trace-exp.service.erb" => "datadog-agent-trace-exp.service", + "datadog-agent-process.service.erb" => "datadog-agent-process.service", + "datadog-agent-process-exp.service.erb" => "datadog-agent-process-exp.service", + "datadog-agent-security.service.erb" => "datadog-agent-security.service", + "datadog-agent-security-exp.service.erb" => "datadog-agent-security-exp.service", + "datadog-agent-sysprobe.service.erb" => "datadog-agent-sysprobe.service", + "datadog-agent-sysprobe-exp.service.erb" => "datadog-agent-sysprobe-exp.service", + "start-experiment.path.erb" => "start-experiment.path", + "stop-experiment.path.erb" => "stop-experiment.path", + "datadog-updater.service.erb" => "datadog-updater.service", + } + templateToFile.each do |template, file| + erb source: template, + dest: systemdPath + file, + mode: 0644, + vars: { install_dir: install_dir, etc_dir: etc_dir } + end + + end + + # The file below is touched by software builds that don't put anything in the installation + # directory (libgcc right now) so that the git_cache gets updated let's remove it from the + # final package + delete "#{install_dir}/uselessfile" +end + diff --git a/omnibus/config/templates/updater/datadog-agent-exp.service.erb b/omnibus/config/templates/updater/datadog-agent-exp.service.erb new file mode 100644 index 0000000000000..6b521fd96b4eb --- /dev/null +++ b/omnibus/config/templates/updater/datadog-agent-exp.service.erb @@ -0,0 +1,22 @@ +[Unit] +Description=Datadog Agent Experiment +After=network.target +OnFailure=datadog-agent.service +Conflicts=datadog-agent.service +Before=datadog-agent.service +JobTimeoutSec=3000 +Wants=datadog-agent-trace-exp.service datadog-agent-process-exp.service datadog-agent-sysprobe-exp.service datadog-agent-security-exp.service + +[Service] +Type=oneshot +PIDFile=<%= install_dir %>/run/agent.pid +User=dd-agent +EnvironmentFile=-<%= etc_dir %>/environment +ExecStart=<%= install_dir %>/agent_entrypoints/experiment_agent/agent run -p <%= install_dir %>/run/agent.pid +ExecStart=<%= install_dir %>/agent_entrypoints/experiment_agent/agent run -p <%= install_dir %>/run/agent.pid +ExecStart=<%= install_dir %>/agent_entrypoints/experiment_agent/agent run -p <%= install_dir %>/run/agent.pid +ExecStart=/bin/false +ExecStop=/bin/false + +[Install] +WantedBy=multi-user.target diff --git a/omnibus/config/templates/updater/datadog-agent-process-exp.service.erb b/omnibus/config/templates/updater/datadog-agent-process-exp.service.erb new file mode 100644 index 0000000000000..794c58dce4173 --- /dev/null +++ b/omnibus/config/templates/updater/datadog-agent-process-exp.service.erb @@ -0,0 +1,19 @@ +[Unit] +Description=Datadog Process Agent Experiment +After=network.target +BindsTo=datadog-agent-exp.service + +[Service] +Type=simple +PIDFile=<%= install_dir %>/run/process-agent.pid +User=dd-agent +Restart=on-failure +EnvironmentFile=-<%= etc_dir %>/environment +ExecStart=<%= install_dir %>/agent_entrypoints/experiment_agent/embedded/bin/process-agent --cfgpath=<%= etc_dir %>/datadog.yaml --sysprobe-config=<%= etc_dir %>/system-probe.yaml --pid=<%= install_dir %>/run/process-agent.pid +# Since systemd 229, should be in [Unit] but in order to support systemd <229, +# it is also supported to have it here. +StartLimitInterval=10 +StartLimitBurst=5 + +[Install] +WantedBy=multi-user.target diff --git a/omnibus/config/templates/updater/datadog-agent-process.service.erb b/omnibus/config/templates/updater/datadog-agent-process.service.erb new file mode 100644 index 0000000000000..f2f09946de542 --- /dev/null +++ b/omnibus/config/templates/updater/datadog-agent-process.service.erb @@ -0,0 +1,19 @@ +[Unit] +Description=Datadog Process Agent +After=network.target datadog-agent.service +BindsTo=datadog-agent.service + +[Service] +Type=simple +PIDFile=<%= install_dir %>/run/process-agent.pid +User=dd-agent +Restart=on-failure +EnvironmentFile=-<%= etc_dir %>/environment +ExecStart=<%= install_dir %>/agent_entrypoints/agent/embedded/bin/process-agent --cfgpath=<%= etc_dir %>/datadog.yaml --sysprobe-config=<%= etc_dir %>/system-probe.yaml --pid=<%= install_dir %>/run/process-agent.pid +# Since systemd 229, should be in [Unit] but in order to support systemd <229, +# it is also supported to have it here. +StartLimitInterval=10 +StartLimitBurst=5 + +[Install] +WantedBy=multi-user.target diff --git a/omnibus/config/templates/updater/datadog-agent-security-exp.service.erb b/omnibus/config/templates/updater/datadog-agent-security-exp.service.erb new file mode 100644 index 0000000000000..527338e11a4a6 --- /dev/null +++ b/omnibus/config/templates/updater/datadog-agent-security-exp.service.erb @@ -0,0 +1,19 @@ +[Unit] +Description=Datadog Security Agent Experiment +After=network.target +BindsTo=datadog-agent-exp.service +ConditionPathExists=<%= etc_dir %>/security-agent.yaml + +[Service] +Type=simple +PIDFile=<%= install_dir %>/run/security-agent.pid +Restart=on-failure +EnvironmentFile=-<%= etc_dir %>/environment +ExecStart=<%= install_dir %>/agent_entrypoints/experiment_agent/embedded/bin/security-agent -c <%= etc_dir %>/datadog.yaml --pidfile <%= install_dir %>/run/security-agent.pid +# Since systemd 229, should be in [Unit] but in order to support systemd <229, +# it is also supported to have it here. +StartLimitInterval=10 +StartLimitBurst=5 + +[Install] +WantedBy=multi-user.target diff --git a/omnibus/config/templates/updater/datadog-agent-security.service.erb b/omnibus/config/templates/updater/datadog-agent-security.service.erb new file mode 100644 index 0000000000000..ecb0923ea1f73 --- /dev/null +++ b/omnibus/config/templates/updater/datadog-agent-security.service.erb @@ -0,0 +1,19 @@ +[Unit] +Description=Datadog Security Agent +After=network.target datadog-agent.service +BindsTo=datadog-agent.service +ConditionPathExists=<%= etc_dir %>/security-agent.yaml + +[Service] +Type=simple +PIDFile=<%= install_dir %>/run/security-agent.pid +Restart=on-failure +EnvironmentFile=-<%= etc_dir %>/environment +ExecStart=<%= install_dir %>/agent_entrypoints/agent/embedded/bin/security-agent -c <%= etc_dir %>/datadog.yaml --pidfile <%= install_dir %>/run/security-agent.pid +# Since systemd 229, should be in [Unit] but in order to support systemd <229, +# it is also supported to have it here. +StartLimitInterval=10 +StartLimitBurst=5 + +[Install] +WantedBy=multi-user.target diff --git a/omnibus/config/templates/updater/datadog-agent-sysprobe-exp.service.erb b/omnibus/config/templates/updater/datadog-agent-sysprobe-exp.service.erb new file mode 100644 index 0000000000000..ee405f01e7ea9 --- /dev/null +++ b/omnibus/config/templates/updater/datadog-agent-sysprobe-exp.service.erb @@ -0,0 +1,20 @@ +[Unit] +Description=Datadog System Probe Experiment +Requires=sys-kernel-debug.mount +Before=datadog-agent.service +After=network.target sys-kernel-debug.mount +BindsTo=datadog-agent-exp.service +ConditionPathExists=<%= etc_dir %>/system-probe.yaml + +[Service] +Type=simple +PIDFile=<%= install_dir %>/run/system-probe.pid +Restart=on-failure +ExecStart=<%= install_dir %>/agent_entrypoints/experiment_agent/embedded/bin/system-probe run --config=<%= etc_dir %>/system-probe.yaml --pid=<%= install_dir %>/run/system-probe.pid +# Since systemd 229, should be in [Unit] but in order to support systemd <229, +# it is also supported to have it here. +StartLimitInterval=10 +StartLimitBurst=5 + +[Install] +WantedBy=multi-user.target diff --git a/omnibus/config/templates/updater/datadog-agent-sysprobe.service.erb b/omnibus/config/templates/updater/datadog-agent-sysprobe.service.erb new file mode 100644 index 0000000000000..3c7b070dcc240 --- /dev/null +++ b/omnibus/config/templates/updater/datadog-agent-sysprobe.service.erb @@ -0,0 +1,20 @@ +[Unit] +Description=Datadog System Probe +Requires=sys-kernel-debug.mount +Before=datadog-agent.service +After=network.target sys-kernel-debug.mount +BindsTo=datadog-agent.service +ConditionPathExists=<%= etc_dir %>/system-probe.yaml + +[Service] +Type=simple +PIDFile=<%= install_dir %>/run/system-probe.pid +Restart=on-failure +ExecStart=<%= install_dir %>/agent_entrypoints/agent/embedded/bin/system-probe run --config=<%= etc_dir %>/system-probe.yaml --pid=<%= install_dir %>/run/system-probe.pid +# Since systemd 229, should be in [Unit] but in order to support systemd <229, +# it is also supported to have it here. +StartLimitInterval=10 +StartLimitBurst=5 + +[Install] +WantedBy=multi-user.target diff --git a/omnibus/config/templates/updater/datadog-agent-trace-exp.service.erb b/omnibus/config/templates/updater/datadog-agent-trace-exp.service.erb new file mode 100644 index 0000000000000..6f5c9b46fc8d6 --- /dev/null +++ b/omnibus/config/templates/updater/datadog-agent-trace-exp.service.erb @@ -0,0 +1,18 @@ +[Unit] +Description=Datadog Trace Agent Experiment +BindsTo=datadog-agent-exp.service + +[Service] +Type=simple +PIDFile=<%= install_dir %>/run/trace-agent.pid +User=dd-agent +Restart=on-failure +EnvironmentFile=-<%= etc_dir %>/environment +ExecStart=<%= install_dir %>/agent_entrypoints/experiment_agent/embedded/bin/trace-agent --config <%= etc_dir %>/datadog.yaml --pidfile <%= install_dir %>/run/trace-agent.pid +# Since systemd 229, should be in [Unit] but in order to support systemd <229, +# it is also supported to have it here. +StartLimitInterval=10 +StartLimitBurst=5 + +[Install] +WantedBy=multi-user.target diff --git a/omnibus/config/templates/updater/datadog-agent-trace.service.erb b/omnibus/config/templates/updater/datadog-agent-trace.service.erb new file mode 100644 index 0000000000000..89464bc51c4b1 --- /dev/null +++ b/omnibus/config/templates/updater/datadog-agent-trace.service.erb @@ -0,0 +1,19 @@ +[Unit] +Description=Datadog Trace Agent (APM) +After=datadog-agent.service +BindsTo=datadog-agent.service + +[Service] +Type=simple +PIDFile=<%= install_dir %>/run/trace-agent.pid +User=dd-agent +Restart=on-failure +EnvironmentFile=-<%= etc_dir %>/environment +ExecStart=<%= install_dir %>/agent_entrypoints/agent/embedded/bin/trace-agent --config <%= etc_dir %>/datadog.yaml --pidfile <%= install_dir %>/run/trace-agent.pid +# Since systemd 229, should be in [Unit] but in order to support systemd <229, +# it is also supported to have it here. +StartLimitInterval=10 +StartLimitBurst=5 + +[Install] +WantedBy=multi-user.target diff --git a/omnibus/config/templates/updater/datadog-agent.service.erb b/omnibus/config/templates/updater/datadog-agent.service.erb new file mode 100644 index 0000000000000..160058a2de5bc --- /dev/null +++ b/omnibus/config/templates/updater/datadog-agent.service.erb @@ -0,0 +1,21 @@ +[Unit] +Description=Datadog Agent +After=network.target +Wants=datadog-agent-trace.service datadog-agent-process.service datadog-agent-sysprobe.service datadog-agent-security.service +Conflicts=datadog-agent-trace-exp.service datadog-agent-process-exp.service datadog-agent-sysprobe-exp.service datadog-agent-security-exp.service +Before=datadog-agent-trace-exp.service datadog-agent-process-exp.service datadog-agent-sysprobe-exp.service datadog-agent-security-exp.service + +[Service] +Type=simple +PIDFile=<%= install_dir %>/run/agent.pid +User=dd-agent +Restart=on-failure +EnvironmentFile=-<%= etc_dir %>/environment +ExecStart=<%= install_dir %>/agent_entrypoints/agent/agent run -p <%= install_dir %>/run/agent.pid +# Since systemd 229, should be in [Unit] but in order to support systemd <229, +# it is also supported to have it here. +StartLimitInterval=10 +StartLimitBurst=5 + +[Install] +WantedBy=multi-user.target diff --git a/omnibus/config/templates/updater/datadog-updater.service.erb b/omnibus/config/templates/updater/datadog-updater.service.erb new file mode 100644 index 0000000000000..1766bbfbd6190 --- /dev/null +++ b/omnibus/config/templates/updater/datadog-updater.service.erb @@ -0,0 +1,18 @@ +[Unit] +Description=Datadog Updater +After=network.target + +[Service] +Type=simple +PIDFile=<%= install_dir %>/run/updater.pid +User=dd-agent +Restart=on-failure +EnvironmentFile=-<%= etc_dir %>/environment +ExecStart=<%= install_dir %>/bin/updater/updater run -p <%= install_dir %>/run/updater.pid +# Since systemd 229, should be in [Unit] but in order to support systemd <229, +# it is also supported to have it here. +StartLimitInterval=10 +StartLimitBurst=5 + +[Install] +WantedBy=multi-user.target diff --git a/omnibus/config/templates/updater/start-experiment.path.erb b/omnibus/config/templates/updater/start-experiment.path.erb new file mode 100644 index 0000000000000..bc74e73108360 --- /dev/null +++ b/omnibus/config/templates/updater/start-experiment.path.erb @@ -0,0 +1,10 @@ +[Unit] +Description="Monitor requests to start experiment" + +[PATH] +PathModifed=<%= install_dir %>/systemd_commands/start_experiment +Unit=datadog-agent-exp + +[Install] +WantedBy=multi-user.target + diff --git a/omnibus/config/templates/updater/stop-experiment.path.erb b/omnibus/config/templates/updater/stop-experiment.path.erb new file mode 100644 index 0000000000000..48254f8e55b37 --- /dev/null +++ b/omnibus/config/templates/updater/stop-experiment.path.erb @@ -0,0 +1,9 @@ +[Unit] +Description="Monitor requests to stop experiment + +[PATH] +PathModifed=<%= install_dir %>/systemd_commands/stop_experiment +Unit=datadog-agent + +[Install] +WantedBy=multi-user.target diff --git a/omnibus/package-scripts/updater-deb/postinst b/omnibus/package-scripts/updater-deb/postinst new file mode 100644 index 0000000000000..b3ba11bfa412c --- /dev/null +++ b/omnibus/package-scripts/updater-deb/postinst @@ -0,0 +1,112 @@ +#!/bin/sh +# +# Perform necessary datadog-updater setup steps after package is installed. +# +# .deb: STEP 5 of 5 + +INSTALL_DIR=/opt/datadog +LOG_DIR=/var/log/datadog +CONFIG_DIR=/etc/datadog-agent + +add_user_and_group() { + # Only create group and/or user if they don't already exist + NAME=$1 + HOME_DIR=$2 + getent group "$NAME" >/dev/null || (echo "Creating $NAME group" && addgroup --system "$NAME" --quiet) + set +e + id -u "$NAME" >/dev/null 2>&1 + USER_EXISTS=$? + set -e + if [ ! $USER_EXISTS -eq 0 ]; then + echo "Creating $NAME user" + adduser --system "$NAME" --disabled-login --shell /usr/sbin/nologin --home "$HOME_DIR" --no-create-home --group --quiet + elif id -nG "$NAME" | grep --invert-match --word-regexp --quiet "$NAME"; then + # User exists but is not part of the $NAME group + echo "Adding $NAME user to $NAME group" + usermod -g "$NAME" "$NAME" + fi +} + +enable_stable_agents() { + if command -v systemctl >/dev/null 2>&1; then + # Force systemd to ignore the sysvinit scripts. Only cosmetic, remove some irrelevant warnings during upgrade + SYSTEMCTL_SKIP_SYSV=true systemctl enable datadog-agent-process || true + SYSTEMCTL_SKIP_SYSV=true systemctl enable datadog-agent-sysprobe || true + SYSTEMCTL_SKIP_SYSV=true systemctl enable datadog-agent-trace || true + SYSTEMCTL_SKIP_SYSV=true systemctl enable datadog-agent-security || true + SYSTEMCTL_SKIP_SYSV=true systemctl enable datadog-agent || true + # experiment agents are not enabled as we don't systemctl enable them + fi + + +} + +set -e +case "$1" in + configure) + add_user_and_group 'dd-agent' $INSTALL_DIR/agents + add_user_and_group 'dd-updater' $INSTALL_DIR + usermod -g dd-agent dd-updater + ;; + abort-upgrade|abort-remove|abort-deconfigure) + ;; + *) + ;; +esac +#DEBHELPER# + +# Set the installation information if not already present; +# This is done in posttrans for .rpm packages +if [ ! -f "$CONFIG_DIR/install_info" ]; then + + if command -v dpkg >/dev/null 2>&1 && command -v dpkg-query >/dev/null 2>&1; then + tool=dpkg + tool_version=dpkg-$(dpkg-query --showformat='${Version}' --show dpkg | cut -d "." -f 1-3 || echo "unknown") + else + tool=unknown + tool_version=unknown + fi + + install_info_content="--- +install_method: + tool: $tool + tool_version: $tool_version + installer_version: deb_package + installer: updater +" + echo "$install_info_content" > $CONFIG_DIR/install_info +fi + +# Set proper rights to the dd-agent user +chown -R dd-agent:dd-agent ${CONFIG_DIR} +chown -R dd-agent:dd-agent ${LOG_DIR} +chown -R dd-updater:dd-updater ${INSTALL_DIR} + +chmod -R 755 ${INSTALL_DIR} + +# Make system-probe configs read-only +chmod 0440 ${CONFIG_DIR}/system-probe.yaml.example || true +if [ -f "$CONFIG_DIR/system-probe.yaml" ]; then + chmod 0440 ${CONFIG_DIR}/system-probe.yaml || true +fi + +# Make security-agent config read-only +chmod 0440 ${CONFIG_DIR}/security-agent.yaml.example || true +if [ -f "$CONFIG_DIR/security-agent.yaml" ]; then + chmod 0440 ${CONFIG_DIR}/security-agent.yaml || true +fi + +if [ -d "$CONFIG_DIR/compliance.d" ]; then + chown -R root:root ${CONFIG_DIR}/compliance.d || true +fi + +if [ -d "$CONFIG_DIR/runtime-security.d" ]; then + chown -R root:root ${CONFIG_DIR}/runtime-security.d || true +fi + +# start udpater +SYSTEMCTL_SKIP_SYSV=true systemctl enable datadog-updater || true +SYSTEMCTL_SKIP_SYSV=true systemctl start datadog-updater || true +enable_stable_agents + +exit 0 diff --git a/omnibus/package-scripts/updater-deb/postinst-dbg b/omnibus/package-scripts/updater-deb/postinst-dbg new file mode 100644 index 0000000000000..482c1527f0218 --- /dev/null +++ b/omnibus/package-scripts/updater-deb/postinst-dbg @@ -0,0 +1,9 @@ +#!/bin/sh +# +# Perform necessary datadog-updater setup steps after package is installed. +# +# .deb: STEP 2 of 5 + +## NOTHING HERE + +exit 0 diff --git a/omnibus/package-scripts/updater-deb/postrm b/omnibus/package-scripts/updater-deb/postrm new file mode 100644 index 0000000000000..0e4487d2e0c4c --- /dev/null +++ b/omnibus/package-scripts/updater-deb/postrm @@ -0,0 +1,35 @@ +#!/bin/sh +# +# Perform necessary datadog-updater removal steps after package is uninstalled. +# +# .deb: STEP 3 of 5 + +INSTALL_DIR=/opt/datadog +LOG_DIR=/var/log/datadog +CONFIG_DIR=/etc/datadog-agent + +set -e + +case "$1" in + purge) + echo "Deleting dd-agent user" + deluser dd-agent --quiet + echo "Deleting dd-updater user" + deluser dd-updater --quiet + echo "Deleting dd-agent group" + (getent group dd-agent >/dev/null && delgroup dd-agent --quiet) || true + echo "Deleting dd-updater group" + (getent group dd-updater >/dev/null && delgroup dd-updater --quiet) || true + echo "Force-deleting $INSTALL_DIR" + rm -rf $INSTALL_DIR + rm -rf $LOG_DIR + rm -rf $CONFIG_DIR + ;; + remove) + rm "$CONFIG_DIR/install_info" || true + ;; + *) + ;; +esac + +exit 0 diff --git a/omnibus/package-scripts/updater-deb/postrm-dbg b/omnibus/package-scripts/updater-deb/postrm-dbg new file mode 100644 index 0000000000000..a7c9d14bd8740 --- /dev/null +++ b/omnibus/package-scripts/updater-deb/postrm-dbg @@ -0,0 +1,9 @@ +#!/bin/sh +# +# Perform necessary datadog-updater removal steps after package is uninstalled. +# +# .deb: STEP 3 of 5 + +## NOTHING HERE + +exit 0 diff --git a/omnibus/package-scripts/updater-deb/preinst b/omnibus/package-scripts/updater-deb/preinst new file mode 100644 index 0000000000000..2bede251c2dac --- /dev/null +++ b/omnibus/package-scripts/updater-deb/preinst @@ -0,0 +1,9 @@ +#!/bin/sh +# +# +# .deb: STEP 2 of 5 + + +SYSTEMCTL_SKIP_SYSV=true systemctl stop datadog-updater || true + +exit 0 diff --git a/omnibus/package-scripts/updater-deb/preinst-dbg b/omnibus/package-scripts/updater-deb/preinst-dbg new file mode 100644 index 0000000000000..c1fef31a31b0e --- /dev/null +++ b/omnibus/package-scripts/updater-deb/preinst-dbg @@ -0,0 +1,9 @@ +#!/bin/sh +# +# Perform necessary datadog-updater setup steps before package is installed. +# +# .deb: STEP 2 of 5 + +## NOTHING HERE + +exit 0 diff --git a/omnibus/package-scripts/updater-deb/prerm b/omnibus/package-scripts/updater-deb/prerm new file mode 100644 index 0000000000000..1b1766ef0b4af --- /dev/null +++ b/omnibus/package-scripts/updater-deb/prerm @@ -0,0 +1,53 @@ +#!/bin/sh +# +# +# .deb: STEP 1 of 5 + + +stop_agents() +{ + if command -v systemctl >/dev/null 2>&1; then + # Force systemd to ignore the sysvinit scripts. Only cosmetic, remove some irrelevant warnings during upgrade + + # starting with experiment agents to avoid retriggering agent + SYSTEMCTL_SKIP_SYSV=true systemctl stop datadog-agent-process-exp || true + SYSTEMCTL_SKIP_SYSV=true systemctl stop datadog-agent-sysprobe-exp || true + SYSTEMCTL_SKIP_SYSV=true systemctl stop datadog-agent-trace-exp || true + SYSTEMCTL_SKIP_SYSV=true systemctl stop datadog-agent-security-exp || true + SYSTEMCTL_SKIP_SYSV=true systemctl stop datadog-agent-exp || true + + SYSTEMCTL_SKIP_SYSV=true systemctl stop datadog-agent-process || true + SYSTEMCTL_SKIP_SYSV=true systemctl stop datadog-agent-sysprobe || true + SYSTEMCTL_SKIP_SYSV=true systemctl stop datadog-agent-trace || true + SYSTEMCTL_SKIP_SYSV=true systemctl stop datadog-agent-security || true + SYSTEMCTL_SKIP_SYSV=true systemctl stop datadog-agent || true + fi +} + +deregister_agents() +{ + if command -v systemctl >/dev/null 2>&1; then + # Force systemd to ignore the sysvinit scripts. Only cosmetic, remove some irrelevant warnings during upgrade + SYSTEMCTL_SKIP_SYSV=true systemctl disable datadog-agent-process || true + SYSTEMCTL_SKIP_SYSV=true systemctl disable datadog-agent-sysprobe || true + SYSTEMCTL_SKIP_SYSV=true systemctl disable datadog-agent-trace || true + SYSTEMCTL_SKIP_SYSV=true systemctl disable datadog-agent-security || true + SYSTEMCTL_SKIP_SYSV=true systemctl disable datadog-agent || true + + # experiment agents are not disabled as we don't systemctl enable them + fi +} +case "$1" in + remove) + stop_agents + deregister_agents + ;; + upgrade) + SYSTEMCTL_SKIP_SYSV=true systemctl stop datadog-updater || true + SYSTEMCTL_SKIP_SYSV=true systemctl disable datadog-updater || true + ;; + *) + ;; +esac + +exit 0 diff --git a/omnibus/package-scripts/updater-deb/prerm-dbg b/omnibus/package-scripts/updater-deb/prerm-dbg new file mode 100644 index 0000000000000..9f8a3e17505fa --- /dev/null +++ b/omnibus/package-scripts/updater-deb/prerm-dbg @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Perform necessary datadog-updater setup steps prior to remove the old package. +# +# .deb: STEP 1 of 5 + +## NOTHING HERE + +exit 0 +~ diff --git a/pkg/cli/subcommands/check/command.go b/pkg/cli/subcommands/check/command.go index 2b1857a98aea2..06dfed236706d 100644 --- a/pkg/cli/subcommands/check/command.go +++ b/pkg/cli/subcommands/check/command.go @@ -43,6 +43,7 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" orchestratorForwarderImpl "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorimpl" + logagent "github.com/DataDog/datadog-agent/comp/logs/agent" "github.com/DataDog/datadog-agent/comp/metadata/host" "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" "github.com/DataDog/datadog-agent/comp/metadata/inventorychecks" @@ -154,6 +155,10 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { fx.Provide(func() optional.Option[collector.Collector] { return optional.NewNoneOption[collector.Collector]() }), + fx.Provide(func() optional.Option[logagent.Component] { + return optional.NewNoneOption[logagent.Component]() + + }), fx.Provide(func() serializer.MetricSerializer { return nil }), fx.Supply(defaultforwarder.Params{UseNoopForwarder: true}), demultiplexerimpl.Module(), diff --git a/pkg/clusteragent/admission/start.go b/pkg/clusteragent/admission/start.go index 99f0ab3caca64..7a1075f371d25 100644 --- a/pkg/clusteragent/admission/start.go +++ b/pkg/clusteragent/admission/start.go @@ -18,7 +18,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/log" - "k8s.io/client-go/discovery" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" @@ -31,7 +30,6 @@ type ControllerContext struct { SecretInformers informers.SharedInformerFactory WebhookInformers informers.SharedInformerFactory Client kubernetes.Interface - DiscoveryClient discovery.DiscoveryInterface StopCh chan struct{} } @@ -58,12 +56,12 @@ func StartControllers(ctx ControllerContext) error { secretConfig, ) - nsSelectorEnabled, err := useNamespaceSelector(ctx.DiscoveryClient) + nsSelectorEnabled, err := useNamespaceSelector(ctx.Client.Discovery()) if err != nil { return err } - v1Enabled, err := UseAdmissionV1(ctx.DiscoveryClient) + v1Enabled, err := UseAdmissionV1(ctx.Client.Discovery()) if err != nil { return err } diff --git a/pkg/clusteragent/clusterchecks/dispatcher_main.go b/pkg/clusteragent/clusterchecks/dispatcher_main.go index 939e4cb26cf39..49acf34dc4093 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_main.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_main.go @@ -30,12 +30,13 @@ const ( // dispatcher holds the management logic for cluster-checks type dispatcher struct { - store *clusterStore - nodeExpirationSeconds int64 - extraTags []string - clcRunnersClient clusteragent.CLCRunnerClientInterface - advancedDispatching bool - excludedChecks map[string]struct{} + store *clusterStore + nodeExpirationSeconds int64 + extraTags []string + clcRunnersClient clusteragent.CLCRunnerClientInterface + advancedDispatching bool + excludedChecks map[string]struct{} + excludedChecksFromDispatching map[string]struct{} } func newDispatcher() *dispatcher { @@ -54,6 +55,15 @@ func newDispatcher() *dispatcher { } } + excludedChecksFromDispatching := config.Datadog.GetStringSlice("cluster_checks.exclude_checks_from_dispatching") + // This option will almost always be empty + if len(excludedChecksFromDispatching) > 0 { + d.excludedChecksFromDispatching = make(map[string]struct{}, len(excludedChecksFromDispatching)) + for _, checkName := range excludedChecksFromDispatching { + d.excludedChecksFromDispatching[checkName] = struct{}{} + } + } + hname, _ := hostname.Get(context.TODO()) clusterTagValue := clustername.GetClusterName(context.TODO(), hname) clusterTagName := config.Datadog.GetString("cluster_checks.cluster_tag_name") diff --git a/pkg/clusteragent/clusterchecks/dispatcher_nodes.go b/pkg/clusteragent/clusterchecks/dispatcher_nodes.go index ae65e9c6aca45..f4ebda60a807d 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_nodes.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_nodes.go @@ -216,15 +216,25 @@ func (d *dispatcher) updateRunnersStats() { continue } node.Lock() - for id, checkStats := range stats { + for idStr, checkStats := range stats { + id := checkid.ID(idStr) + // Stats contain info about all the running checks on a node // Node checks must be filtered from Cluster Checks // so they can be included in calculating node Agent busyness and excluded from rebalancing decisions. - if _, found := d.store.idToDigest[checkid.ID(id)]; found { + if _, found := d.store.idToDigest[id]; found { // Cluster check detected (exists in the Cluster Agent checks store) log.Tracef("Check %s running on node %s is a cluster check", id, node.name) checkStats.IsClusterCheck = true - stats[id] = checkStats + stats[idStr] = checkStats + } + + checkName := checkid.IDToCheckName(id) + if _, found := d.excludedChecksFromDispatching[checkName]; found { + // TODO: We are abusing the IsClusterCheck field to mark checks that should be excluded from rebalancing decisions. + // It behaves the same way as we want to count them in rebalance decisions but we don't want to move them. + checkStats.IsClusterCheck = false + stats[idStr] = checkStats } } node.clcRunnerStats = stats diff --git a/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go b/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go index d1e28bce5fe83..be81bff42fc57 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go @@ -364,15 +364,12 @@ func (d *dispatcher) currentDistribution() checksDistribution { for nodeName, nodeStoreInfo := range d.store.nodes { for checkID, stats := range nodeStoreInfo.clcRunnerStats { - digest, found := d.store.idToDigest[checkid.ID(checkID)] - if !found { // Not a cluster check + if !stats.IsClusterCheck { continue } minCollectionInterval := defaults.DefaultCheckInterval - - conf := d.store.digestToConfig[digest] - + conf := d.store.digestToConfig[d.store.idToDigest[checkid.ID(checkID)]] if len(conf.Instances) > 0 { commonOptions := integration.CommonInstanceConfig{} err := yaml.Unmarshal(conf.Instances[0], &commonOptions) diff --git a/pkg/clusteragent/externalmetrics/provider.go b/pkg/clusteragent/externalmetrics/provider.go index 9983526a4aa0a..81acfa41204ae 100644 --- a/pkg/clusteragent/externalmetrics/provider.go +++ b/pkg/clusteragent/externalmetrics/provider.go @@ -16,6 +16,7 @@ import ( apierr "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/dynamic/dynamicinformer" "k8s.io/metrics/pkg/apis/external_metrics" "sigs.k8s.io/custom-metrics-apiserver/pkg/provider" @@ -61,6 +62,7 @@ func NewDatadogMetricProvider(ctx context.Context, apiCl *apiserver.APIClient) ( splitBatchBackoffOnErrors := config.Datadog.GetBool("external_metrics_provider.split_batches_with_backoff") autogenNamespace := common.GetResourcesNamespace() autogenEnabled := config.Datadog.GetBool("external_metrics_provider.enable_datadogmetric_autogen") + wpaEnabled := config.Datadog.GetBool("external_metrics_provider.wpa_controller") provider := &datadogMetricProvider{ apiCl: apiCl, @@ -80,6 +82,11 @@ func NewDatadogMetricProvider(ctx context.Context, apiCl *apiserver.APIClient) ( } go metricsRetriever.Run(ctx.Done()) + var wpaInformer dynamicinformer.DynamicSharedInformerFactory + if wpaEnabled { + wpaInformer = apiCl.DynamicInformerFactory + } + // Start AutoscalerWatcher, only leader will flag DatadogMetrics as Active/Inactive // WPAInformerFactory is nil when WPA is not used. AutoscalerWatcher will check value itself. autoscalerWatcher, err := NewAutoscalerWatcher( @@ -89,27 +96,25 @@ func NewDatadogMetricProvider(ctx context.Context, apiCl *apiserver.APIClient) ( autogenNamespace, apiCl.Cl, apiCl.InformerFactory, - apiCl.WPAInformerFactory, + wpaInformer, le.IsLeader, &provider.store, ) if err != nil { return nil, fmt.Errorf("Unabled to create DatadogMetricProvider as AutoscalerWatcher failed with: %v", err) } - apiCl.InformerFactory.Start(ctx.Done()) - if apiCl.WPAInformerFactory != nil { - apiCl.WPAInformerFactory.Start(ctx.Done()) - } - go autoscalerWatcher.Run(ctx.Done()) // We shift controller refresh period from retrieverRefreshPeriod to maximize the probability to have new data from DD - controller, err := NewDatadogMetricController(apiCl.DDClient, apiCl.DynamicInformerFactory, le.IsLeader, &provider.store) + controller, err := NewDatadogMetricController(apiCl.DynamicCl, apiCl.DynamicInformerFactory, le.IsLeader, &provider.store) if err != nil { return nil, fmt.Errorf("Unable to create DatadogMetricProvider as DatadogMetric Controller failed with: %v", err) } // Start informers & controllers (informers can be started multiple times) apiCl.DynamicInformerFactory.Start(ctx.Done()) + apiCl.InformerFactory.Start(ctx.Done()) + + go autoscalerWatcher.Run(ctx.Done()) go controller.Run(ctx) return provider, nil diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index f48da965d9c3f..3102947962c41 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -28,8 +28,6 @@ const ( started ) -const cancelCheckTimeout time.Duration = 500 * time.Millisecond - // EventType represents the type of events emitted by the collector type EventType uint32 @@ -77,16 +75,19 @@ type collector struct { checks map[checkid.ID]*middleware.CheckWrapper eventReceivers []EventReceiver + cancelCheckTimeout time.Duration + m sync.RWMutex } // NewCollector create a Collector instance and sets up the Python Environment -func NewCollector(senderManager sender.SenderManager, paths ...string) Collector { +func NewCollector(senderManager sender.SenderManager, cancelCheckTimeout time.Duration, paths ...string) Collector { c := &collector{ - senderManager: senderManager, - checks: make(map[checkid.ID]*middleware.CheckWrapper), - state: atomic.NewUint32(stopped), - checkInstances: int64(0), + senderManager: senderManager, + checks: make(map[checkid.ID]*middleware.CheckWrapper), + state: atomic.NewUint32(stopped), + checkInstances: int64(0), + cancelCheckTimeout: cancelCheckTimeout, } pyVer, pyHome, pyPath := pySetup(paths...) @@ -220,11 +221,11 @@ func (c *collector) StopCheck(id checkid.ID) error { err = c.runner.StopCheck(id) if err != nil { // still attempt to cancel the check before returning the error - _ = c.cancelCheck(ch, cancelCheckTimeout) + _ = c.cancelCheck(ch, c.cancelCheckTimeout) return fmt.Errorf("an error occurred while stopping the check: %s", err) } - err = c.cancelCheck(ch, cancelCheckTimeout) + err = c.cancelCheck(ch, c.cancelCheckTimeout) if err != nil { return fmt.Errorf("an error occurred while calling check.Cancel(): %s", err) } @@ -251,7 +252,7 @@ func (c *collector) cancelCheck(ch check.Check, timeout time.Duration) error { case <-done: return nil case <-time.After(timeout): - return fmt.Errorf("timeout while calling check.Cancel() on check ID %s", ch.ID()) + return fmt.Errorf("timeout while calling check.Cancel() on check ID %s, timeout: %s", ch.ID(), timeout) } } diff --git a/pkg/collector/collector_demux_test.go b/pkg/collector/collector_demux_test.go index e49a6e8bb9e86..7ea01c030560d 100644 --- a/pkg/collector/collector_demux_test.go +++ b/pkg/collector/collector_demux_test.go @@ -33,7 +33,7 @@ type CollectorDemuxTestSuite struct { func (suite *CollectorDemuxTestSuite) SetupTest() { log := fxutil.Test[log.Component](suite.T(), logimpl.MockModule()) suite.demux = aggregator.InitTestAgentDemultiplexerWithFlushInterval(log, 100*time.Hour) - suite.c = NewCollector(suite.demux).(*collector) + suite.c = NewCollector(suite.demux, 500*time.Millisecond).(*collector) suite.c.Start() } diff --git a/pkg/collector/collector_test.go b/pkg/collector/collector_test.go index 4cc63e5b30dcb..ea0c0d8addf44 100644 --- a/pkg/collector/collector_test.go +++ b/pkg/collector/collector_test.go @@ -41,6 +41,7 @@ func (c *TestCheck) ID() checkid.ID { } return checkid.ID(c.String()) } + func (c *TestCheck) String() string { if c.name != "" { return c.name @@ -84,7 +85,7 @@ type CollectorTestSuite struct { } func (suite *CollectorTestSuite) SetupTest() { - suite.c = NewCollector(aggregator.NewNoOpSenderManager()).(*collector) + suite.c = NewCollector(aggregator.NewNoOpSenderManager(), 500*time.Millisecond).(*collector) suite.c.Start() } diff --git a/pkg/collector/corechecks/cluster/helm/helm.go b/pkg/collector/corechecks/cluster/helm/helm.go index 13f69102823a7..d28ff7d7d5c35 100644 --- a/pkg/collector/corechecks/cluster/helm/helm.go +++ b/pkg/collector/corechecks/cluster/helm/helm.go @@ -31,6 +31,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/util/pointer" ) const ( @@ -119,7 +120,6 @@ func (hc *HelmCheck) Configure(senderManager sender.SenderManager, integrationCo hc.setSharedInformerFactory(apiClient) hc.startTS = time.Now() - hc.informersStopCh = make(chan struct{}) return nil @@ -205,13 +205,11 @@ func (hc *HelmCheck) setupInformers() error { } func (hc *HelmCheck) setSharedInformerFactory(apiClient *apiserver.APIClient) { - hc.informerFactory = informers.NewSharedInformerFactoryWithOptions( - apiClient.Cl, - hc.getInformersResyncPeriod(), + hc.informerFactory = apiClient.GetInformerWithOptions( + pointer.Ptr(hc.getInformersResyncPeriod()), informers.WithTweakListOptions(func(opts *metav1.ListOptions) { opts.LabelSelector = labelSelector - }), - ) + })) } func (hc *HelmCheck) allTags(release *release, storageDriver helmStorage, includeRevision bool) []string { diff --git a/pkg/collector/corechecks/cluster/ksm/customresources/apiservice.go b/pkg/collector/corechecks/cluster/ksm/customresources/apiservice.go index 2a53c81c3d772..36b7600ab94ed 100644 --- a/pkg/collector/corechecks/cluster/ksm/customresources/apiservice.go +++ b/pkg/collector/corechecks/cluster/ksm/customresources/apiservice.go @@ -35,7 +35,7 @@ var ( // NewAPIServiceFactory returns a new APIService metric family generator factory. func NewAPIServiceFactory(client *apiserver.APIClient) customresource.RegistryFactory { return &apiserviceFactory{ - client: client.APISClient, + client: client.APISInformerClient, } } diff --git a/pkg/collector/corechecks/cluster/ksm/customresources/crd.go b/pkg/collector/corechecks/cluster/ksm/customresources/crd.go index 31e32057da624..a3b3d5ac24828 100644 --- a/pkg/collector/corechecks/cluster/ksm/customresources/crd.go +++ b/pkg/collector/corechecks/cluster/ksm/customresources/crd.go @@ -40,7 +40,7 @@ var ( // metric family generator factory. func NewCustomResourceDefinitionFactory(client *apiserver.APIClient) customresource.RegistryFactory { return &crdFactory{ - client: client.CRDClient, + client: client.CRDInformerClient, } } diff --git a/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go b/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go index 6616eade06157..e5d27b6beaceb 100644 --- a/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go +++ b/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go @@ -22,6 +22,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/ksm/customresources" "github.com/DataDog/datadog-agent/pkg/config" + //nolint:revive // TODO(CINT) Fix revive linter ddconfig "github.com/DataDog/datadog-agent/pkg/config" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" @@ -204,12 +205,6 @@ func (k *KSMCheck) Configure(senderManager sender.SenderManager, integrationConf k.BuildID(integrationConfigDigest, config, initConfig) k.agentConfig = ddconfig.Datadog - // Retrieve cluster name - k.getClusterName() - - // Initialize global tags and check tags - k.initTags() - err := k.CommonConfigure(senderManager, integrationConfigDigest, initConfig, config, source) if err != nil { return err @@ -220,6 +215,12 @@ func (k *KSMCheck) Configure(senderManager sender.SenderManager, integrationConf return err } + // Retrieve cluster name + k.getClusterName() + + // Initialize global tags and check tags + k.initTags() + // Prepare label joins for _, joinConf := range k.instance.LabelJoins { joinConf.setupGetAllLabels() @@ -249,7 +250,7 @@ func (k *KSMCheck) Configure(senderManager sender.SenderManager, integrationConf } // Discover resources that are currently available - resources, err := discoverResources(c.DiscoveryCl) + resources, err := discoverResources(c.Cl.Discovery()) if err != nil { return err } @@ -295,9 +296,9 @@ func (k *KSMCheck) Configure(senderManager sender.SenderManager, integrationConf builder.WithFamilyGeneratorFilter(allowDenyList) - builder.WithKubeClient(c.Cl) + builder.WithKubeClient(c.InformerCl) - builder.WithVPAClient(c.VPAClient) + builder.WithVPAClient(c.VPAInformerClient) ctx, cancel := context.WithCancel(context.Background()) k.cancel = cancel @@ -418,11 +419,6 @@ func (k *KSMCheck) discoverCustomResources(c *apiserver.APIClient, collectors [] } func manageResourcesReplacement(c *apiserver.APIClient, factories []customresource.RegistryFactory, resources []*v1.APIResourceList) []customresource.RegistryFactory { - if c.DiscoveryCl == nil { - log.Warn("Kubernetes discovery client has not been properly initialized") - return factories - } - // backwards/forwards compatibility resource factories are only // registered if they're needed, otherwise they'd overwrite the default // ones that ship with ksm @@ -472,6 +468,15 @@ func (k *KSMCheck) Run() error { return err } + // Normally the sender is kept for the lifetime of the check. + // But as `SetCheckCustomTags` is cheap and `k.instance.Tags` is immutable + // It's fast and safe to set it after we get the sender. + sender.SetCheckCustomTags(k.instance.Tags) + + // Do not fallback to the Agent hostname if the hostname corresponding to the KSM metric is unknown + // Note that by design, some metrics cannot have hostnames (e.g kubernetes_state.pod.unschedulable) + sender.DisableDefaultHostname(true) + // If the check is configured as a cluster check, the cluster check worker needs to skip the leader election section. // we also do a safety check for dedicated runners to avoid trying the leader election if !k.isCLCRunner || !k.instance.LeaderSkip { @@ -496,10 +501,6 @@ func (k *KSMCheck) Run() error { defer sender.Commit() - // Do not fallback to the Agent hostname if the hostname corresponding to the KSM metric is unknown - // Note that by design, some metrics cannot have hostnames (e.g kubernetes_state.pod.unschedulable) - sender.DisableDefaultHostname(true) - labelJoiner := newLabelJoiner(k.instance.labelJoins) for _, stores := range k.allStores { for _, store := range stores { @@ -778,10 +779,6 @@ func (k *KSMCheck) getClusterName() { // Sets the kube_cluster_name tag for all metrics. // Adds the global user-defined tags from the Agent config. func (k *KSMCheck) initTags() { - if k.instance.Tags == nil { - k.instance.Tags = []string{} - } - if k.clusterNameTagValue != "" { k.instance.Tags = append(k.instance.Tags, "kube_cluster_name:"+k.clusterNameTagValue) } diff --git a/pkg/collector/corechecks/cluster/orchestrator/discovery/collector_discovery.go b/pkg/collector/corechecks/cluster/orchestrator/discovery/collector_discovery.go index 7f38008eaf078..c9d1322623cda 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/discovery/collector_discovery.go +++ b/pkg/collector/corechecks/cluster/orchestrator/discovery/collector_discovery.go @@ -45,7 +45,6 @@ func NewAPIServerDiscoveryProvider() *APIServerDiscoveryProvider { // Discover returns collectors to enable based on information exposed by the API server. func (p *APIServerDiscoveryProvider) Discover(inventory *inventory.CollectorInventory) ([]collectors.Collector, error) { groups, resources, err := GetServerGroupsAndResources() - if err != nil { return nil, err } @@ -75,7 +74,7 @@ func GetServerGroupsAndResources() ([]*v1.APIGroup, []*v1.APIResourceList, error return nil, nil, err } - groups, resources, err := client.DiscoveryCl.ServerGroupsAndResources() + groups, resources, err := client.Cl.Discovery().ServerGroupsAndResources() if err != nil { if !discovery.IsGroupDiscoveryFailedError(err) { return nil, nil, err diff --git a/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go b/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go index 88fe88d4893a5..a5a82dab471bb 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go +++ b/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go @@ -203,16 +203,17 @@ func (o *OrchestratorCheck) Cancel() { } func getOrchestratorInformerFactory(apiClient *apiserver.APIClient) *collectors.OrchestratorInformerFactory { - of := &collectors.OrchestratorInformerFactory{ - InformerFactory: informers.NewSharedInformerFactory(apiClient.Cl, defaultResyncInterval), - CRDInformerFactory: externalversions.NewSharedInformerFactory(apiClient.CRDClient, defaultResyncInterval), - DynamicInformerFactory: dynamicinformer.NewDynamicSharedInformerFactory(apiClient.DynamicCl, defaultResyncInterval), - VPAInformerFactory: vpai.NewSharedInformerFactory(apiClient.VPAClient, defaultResyncInterval), - } - tweakListOptions := func(options *metav1.ListOptions) { options.FieldSelector = fields.OneTermEqualSelector("spec.nodeName", "").String() } - of.UnassignedPodInformerFactory = informers.NewSharedInformerFactoryWithOptions(apiClient.Cl, defaultResyncInterval, informers.WithTweakListOptions(tweakListOptions)) + + of := &collectors.OrchestratorInformerFactory{ + InformerFactory: informers.NewSharedInformerFactoryWithOptions(apiClient.InformerCl, defaultResyncInterval), + CRDInformerFactory: externalversions.NewSharedInformerFactory(apiClient.CRDInformerClient, defaultResyncInterval), + DynamicInformerFactory: dynamicinformer.NewDynamicSharedInformerFactory(apiClient.DynamicInformerCl, defaultResyncInterval), + VPAInformerFactory: vpai.NewSharedInformerFactory(apiClient.VPAInformerClient, defaultResyncInterval), + UnassignedPodInformerFactory: informers.NewSharedInformerFactoryWithOptions(apiClient.InformerCl, defaultResyncInterval, informers.WithTweakListOptions(tweakListOptions)), + } + return of } diff --git a/pkg/collector/corechecks/cluster/orchestrator/orchestrator_test.go b/pkg/collector/corechecks/cluster/orchestrator/orchestrator_test.go index 789c800a63fa8..0d1610dc3c8ec 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/orchestrator_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/orchestrator_test.go @@ -55,7 +55,7 @@ func TestOrchestratorCheckSafeReSchedule(t *testing.T) { client := fake.NewSimpleClientset() vpaClient := vpa.NewSimpleClientset() crdClient := crd.NewSimpleClientset() - cl := &apiserver.APIClient{Cl: client, VPAClient: vpaClient, CRDClient: crdClient} + cl := &apiserver.APIClient{InformerCl: client, VPAInformerClient: vpaClient, CRDInformerClient: crdClient} orchCheck := OrchestratorFactory().(*OrchestratorCheck) orchCheck.apiClient = cl @@ -85,7 +85,6 @@ func TestOrchestratorCheckSafeReSchedule(t *testing.T) { writeNode(t, client, "2") assert.True(t, waitTimeout(&wg, 2*time.Second)) - } func writeNode(t *testing.T, client *fake.Clientset, version string) { diff --git a/pkg/collector/corechecks/oracle-dbm/config/config.go b/pkg/collector/corechecks/oracle-dbm/config/config.go index 2eedb6dc36829..e58c648d6c0a0 100644 --- a/pkg/collector/corechecks/oracle-dbm/config/config.go +++ b/pkg/collector/corechecks/oracle-dbm/config/config.go @@ -167,6 +167,7 @@ func GetDefaultObfuscatorOptions() obfuscate.SQLConfig { ObfuscationMode: obfuscate.ObfuscateAndNormalize, RemoveSpaceBetweenParentheses: true, KeepNull: true, + KeepTrailingSemicolon: true, } } diff --git a/pkg/collector/corechecks/oracle-dbm/oracle_test.go b/pkg/collector/corechecks/oracle-dbm/oracle_test.go index b83f978e315ec..ec5a8e9176e7b 100644 --- a/pkg/collector/corechecks/oracle-dbm/oracle_test.go +++ b/pkg/collector/corechecks/oracle-dbm/oracle_test.go @@ -146,10 +146,14 @@ func TestChkRun(t *testing.T) { tempLobsBefore, _ := getTemporaryLobs(chk.db) + /* Requires: + * create table sys.t(n number); + * grant insert on sys.t to c##datadog + */ _, err = chk.db.Exec(`begin for i in 1..1000 loop - execute immediate 'insert into t values (' || i || ')'; + execute immediate 'insert into sys.t values (' || i || ')'; end loop; end ;`) assert.NoError(t, err, "error generating statements with %s driver", driver) @@ -301,9 +305,9 @@ func TestObfuscator(t *testing.T) { _, err := o.ObfuscateSQLString(`SELECT TRUNC(SYSDATE@!) from dual`) assert.NoError(t, err, "can't obfuscate @!") - sql := "begin null ; end" + sql := "begin null ; end;" obfuscatedStatement, err := o.ObfuscateSQLString(sql) - assert.Equal(t, sql, obfuscatedStatement.Query) + assert.Equal(t, obfuscatedStatement.Query, "begin null; end;") sql = "select count (*) from dual" obfuscatedStatement, err = o.ObfuscateSQLString(sql) diff --git a/pkg/collector/python/datadog_agent.go b/pkg/collector/python/datadog_agent.go index 7658a8b7ede42..3ac438c379ac4 100644 --- a/pkg/collector/python/datadog_agent.go +++ b/pkg/collector/python/datadog_agent.go @@ -274,6 +274,16 @@ type sqlConfig struct { // KeepPositionalParameter specifies whether to disable obfuscate positional parameter with ?. // This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize". KeepPositionalParameter bool `json:"keep_positional_parameter"` + + // KeepTrailingSemicolon specifies whether to keep trailing semicolon. + // By default, trailing semicolon is removed during normalization. + // This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize". + KeepTrailingSemicolon bool `json:"keep_trailing_semicolon"` + + // KeepIdentifierQuotation specifies whether to keep identifier quotation, e.g. "my_table" or [my_table]. + // By default, identifier quotation is removed during normalization. + // This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize". + KeepIdentifierQuotation bool `json:"keep_identifier_quotation"` } // ObfuscateSQL obfuscates & normalizes the provided SQL query, writing the error into errResult if the operation @@ -306,6 +316,8 @@ func ObfuscateSQL(rawQuery, opts *C.char, errResult **C.char) *C.char { KeepNull: sqlOpts.KeepNull, KeepBoolean: sqlOpts.KeepBoolean, KeepPositionalParameter: sqlOpts.KeepPositionalParameter, + KeepTrailingSemicolon: sqlOpts.KeepTrailingSemicolon, + KeepIdentifierQuotation: sqlOpts.KeepIdentifierQuotation, }) if err != nil { // memory will be freed by caller diff --git a/pkg/compliance/agent.go b/pkg/compliance/agent.go index bbbe461072a5e..8f040f820d038 100644 --- a/pkg/compliance/agent.go +++ b/pkg/compliance/agent.go @@ -121,6 +121,8 @@ type Agent struct { finish chan struct{} cancel context.CancelFunc + + k8sManaged *string } func xccdfEnabled() bool { @@ -144,7 +146,7 @@ func DefaultRuleFilter(r *Rule) bool { return false } if len(r.Filters) > 0 { - ruleFilterModel, err := rules.NewRuleFilterModel() + ruleFilterModel, err := rules.NewRuleFilterModel("") if err != nil { log.Errorf("failed to apply rule filters: %v", err) return false @@ -213,6 +215,11 @@ func (a *Agent) Start() error { }), ) + _, k8sResourceData := k8sconfig.LoadConfiguration(ctx, a.opts.HostRoot) + if k8sResourceData != nil && k8sResourceData.ManagedEnvironment != nil { + a.k8sManaged = &k8sResourceData.ManagedEnvironment.Name + } + var wg sync.WaitGroup wg.Add(1) @@ -397,7 +404,7 @@ func (a *Agent) runKubernetesConfigurationsExport(ctx context.Context) { } func (a *Agent) runAptConfigurationExport(ctx context.Context) { - ruleFilterModel, err := rules.NewRuleFilterModel() + ruleFilterModel, err := rules.NewRuleFilterModel("") if err != nil { log.Errorf("failed to run apt configuration export: %v", err) return @@ -536,6 +543,7 @@ func (a *Agent) reportCheckEvents(eventsTTL time.Duration, events ...*CheckEvent event.Container.ImageTag = ctnr.Image.Tag } } + event.K8SManaged = a.k8sManaged a.opts.Reporter.ReportEvent(event) } } diff --git a/pkg/compliance/aptconfig/aptconfig.go b/pkg/compliance/aptconfig/aptconfig.go index a487b504accbe..79b6cdeb21041 100644 --- a/pkg/compliance/aptconfig/aptconfig.go +++ b/pkg/compliance/aptconfig/aptconfig.go @@ -267,6 +267,7 @@ func readFileLimit(path string) (string, error) { if err != nil { return "", err } + defer f.Close() data, err := io.ReadAll(io.LimitReader(f, maxSize)) if err != nil { return "", err diff --git a/pkg/compliance/data.go b/pkg/compliance/data.go index 08286c097d23d..847075ae49e79 100644 --- a/pkg/compliance/data.go +++ b/pkg/compliance/data.go @@ -81,6 +81,7 @@ type CheckEvent struct { ResourceType string `json:"resource_type,omitempty"` ResourceID string `json:"resource_id,omitempty"` Container *CheckContainerMeta `json:"container,omitempty"` + K8SManaged *string `json:"k8s_managed,omitempty"` Tags []string `json:"tags"` Data map[string]interface{} `json:"data"` @@ -158,8 +159,6 @@ func NewCheckEvent( } // NewCheckSkipped returns a CheckEvent with skipped status. -// -//nolint:revive // TODO(CSPM) Fix revive linter func NewCheckSkipped( evaluator Evaluator, skipReason error, @@ -172,6 +171,8 @@ func NewCheckSkipped( AgentVersion: version.AgentVersion, RuleID: rule.ID, FrameworkID: benchmark.FrameworkID, + ResourceID: resourceID, + ResourceType: resourceType, Evaluator: evaluator, Result: CheckSkipped, Data: map[string]interface{}{"error": skipReason.Error()}, diff --git a/pkg/compliance/inputs_docker_nodocker.go b/pkg/compliance/inputs_docker_nodocker.go index 42c8422385b90..e71fb99d01c83 100644 --- a/pkg/compliance/inputs_docker_nodocker.go +++ b/pkg/compliance/inputs_docker_nodocker.go @@ -13,7 +13,6 @@ import ( docker "github.com/docker/docker/client" ) -//nolint:revive // TODO(CSPM) Fix revive linter -func newDockerClient(ctx context.Context) (docker.CommonAPIClient, error) { +func newDockerClient(_ context.Context) (docker.CommonAPIClient, error) { return nil, ErrIncompatibleEnvironment } diff --git a/pkg/compliance/k8sconfig/loader.go b/pkg/compliance/k8sconfig/loader.go index aca7d85963840..a9182c27ee5a0 100644 --- a/pkg/compliance/k8sconfig/loader.go +++ b/pkg/compliance/k8sconfig/loader.go @@ -28,7 +28,7 @@ import ( "gopkg.in/yaml.v3" ) -const version = "202305" +const version = "202312" const ( k8sManifestsDir = "/etc/kubernetes/manifests" @@ -78,13 +78,6 @@ func (l *loader) load(ctx context.Context, loadProcesses procsLoader) (string, * node.Manifests.KubeScheduler = l.loadConfigFileMeta(filepath.Join(k8sManifestsDir, "kube-scheduler.yaml")) node.Manifests.Etcd = l.loadConfigFileMeta(filepath.Join(k8sManifestsDir, "etcd.yaml")) - if eksMeta := l.loadConfigFileMeta("/etc/eks/release"); eksMeta != nil { - node.ManagedEnvironment = &K8sManagedEnvConfig{ - Name: "eks", - Metadata: eksMeta.Content, - } - } - for _, proc := range loadProcesses(ctx) { switch proc.name { case "etcd": @@ -97,6 +90,7 @@ func (l *loader) load(ctx context.Context, loadProcesses procsLoader) (string, * node.Components.KubeScheduler = l.newK8sKubeSchedulerConfig(proc.flags) case "kubelet": node.Components.Kubelet = l.newK8sKubeletConfig(proc.flags) + node.ManagedEnvironment = l.detectManagedEnvironment(proc.flags) case "kube-proxy": node.Components.KubeProxy = l.newK8sKubeProxyConfig(proc.flags) } @@ -107,13 +101,54 @@ func (l *loader) load(ctx context.Context, loadProcesses procsLoader) (string, * } resourceType := "kubernetes_worker_node" - if node.Components.KubeApiserver != nil { + if managedEnv := node.ManagedEnvironment; managedEnv != nil { + switch managedEnv.Name { + case "eks": + resourceType = "aws_eks_worker_node" + case "gke": + resourceType = "gcp_gke_worker_node" + case "aks": + resourceType = "azure_aks_worker_node" + } + } else if node.Components.KubeApiserver != nil || + node.Components.Etcd != nil || + node.Components.KubeControllerManager != nil || + node.Components.KubeScheduler != nil { resourceType = "kubernetes_master_node" } return resourceType, &node } +func (l *loader) detectManagedEnvironment(flags map[string]string) *K8sManagedEnvConfig { + nodeLabels, ok := flags["--node-labels"] + if ok { + for _, label := range strings.Split(nodeLabels, ",") { + label = strings.TrimSpace(label) + switch { + case strings.HasPrefix(label, "cloud.google.com/gke"): + return &K8sManagedEnvConfig{ + Name: "gke", + } + case strings.HasPrefix(label, "eks.amazonaws.com/"): + env := &K8sManagedEnvConfig{ + Name: "eks", + } + eksMeta := l.loadConfigFileMeta("/etc/eks/release") + if eksMeta != nil { + env.Metadata = eksMeta.Content + } + return env + case strings.HasPrefix(label, "kubernetes.azure.com/"): + return &K8sManagedEnvConfig{ + Name: "aks", + } + } + } + } + return nil +} + func (l *loader) loadMeta(name string, loadContent bool) (string, os.FileInfo, []byte, bool) { name = filepath.Join(l.hostroot, name) info, err := os.Stat(name) @@ -131,6 +166,7 @@ func (l *loader) loadMeta(name string, loadContent bool) (string, os.FileInfo, [ if err != nil { l.pushError(err) } else { + defer f.Close() b, err = io.ReadAll(io.LimitReader(f, maxSize)) if err != nil { l.pushError(err) @@ -194,6 +230,38 @@ func (l *loader) loadConfigFileMeta(name string) *K8sConfigFileMeta { } } +func (l *loader) getConfigFromPath(meta *K8sConfigFileMeta, path string) (map[string]interface{}, string, bool) { + if meta == nil || meta.Content == nil { + return nil, "", false + } + content, ok := meta.Content.(map[string]interface{}) + if !ok { + return nil, "", false + } + fields := strings.Split(path, ".") + if len(fields) == 0 { + return nil, "", false + } + if len(fields) > 1 { + for _, field := range fields[:len(fields)-1] { + content, ok = content[field].(map[string]interface{}) + if !ok { + return nil, "", false + } + } + } + return content, fields[len(fields)-1], true +} + +func (l *loader) configFileMetaHasField(meta *K8sConfigFileMeta, path string) bool { + content, lastField, ok := l.getConfigFromPath(meta, path) + if ok { + _, hasField := content[lastField] + return hasField + } + return false +} + func (l *loader) loadKubeletConfigFileMeta(name string) *K8sConfigFileMeta { meta := l.loadConfigFileMeta(name) if meta == nil { @@ -493,40 +561,53 @@ func (l *loader) pushError(err error) { } } -func (l *loader) parseBool(v string) bool { +func (l *loader) parseBool(v string) *bool { if v == "" { - return true + return nil } b, err := strconv.ParseBool(v) if err != nil { l.pushError(err) + return nil } - return b + return &b } //nolint:unused,deadcode -func (l *loader) parseFloat(v string) float64 { +func (l *loader) parseFloat(v string) *float64 { + if v == "" { + return nil + } f, err := strconv.ParseFloat(v, 64) if err != nil { l.pushError(err) + return nil } - return f + return &f } -func (l *loader) parseInt(v string) int { +func (l *loader) parseInt(v string) *int { + if v == "" { + return nil + } i, err := strconv.Atoi(v) if err != nil { l.pushError(err) + return nil } - return i + return &i } -func (l *loader) parseDuration(v string) time.Duration { +func (l *loader) parseDuration(v string) *time.Duration { + if v == "" { + return nil + } d, err := time.ParseDuration(v) if err != nil { l.pushError(err) + return nil } - return d + return &d } func buildProc(name string, cmdline []string) proc { diff --git a/pkg/compliance/k8sconfig/loader_test.go b/pkg/compliance/k8sconfig/loader_test.go index 1481a05ce5972..fcbb55a32ccfe 100644 --- a/pkg/compliance/k8sconfig/loader_test.go +++ b/pkg/compliance/k8sconfig/loader_test.go @@ -32,7 +32,8 @@ kubelet \ --cloud-provider=aws \ --container-runtime=remote \ --node-labels=eks.amazonaws.com/sourceLaunchTemplateVersion=1,alpha.eksctl.io/cluster-name=Sandbox,alpha.eksctl.io/nodegroup-name=standard,eks.amazonaws.com/nodegroup-image=ami-09f37ddb4a6ecc85e,eks.amazonaws.com/capacityType=ON_DEMAND,eks.amazonaws.com/nodegroup=standard,eks.amazonaws.com/sourceLaunchTemplateId=lt-0df2e04572534b928 \ - --max-pods=17 + --max-pods=17 \ + --rotate-server-certificates=true ` // TODO(jinroh): use testdata files @@ -99,7 +100,6 @@ ARCH="aarch64"`, }, "clusterDomain": "cluster.local", "hairpinMode": "hairpin-veth", - "readOnlyPort": 0, "cgroupDriver": "systemd", "cgroupRoot": "/", "featureGates": { @@ -135,7 +135,8 @@ ARCH="aarch64"`, "memory": "442Mi" }, "systemReservedCgroup": "/system", - "kubeReservedCgroup": "/runtime" + "kubeReservedCgroup": "/runtime", + "maxPods": 18 }`, }, { @@ -337,6 +338,20 @@ func TestKubEksConfigLoader(t *testing.T) { assert.NotNil(t, conf.Components.Kubelet.Kubeconfig) assert.NotNil(t, conf.Components.Kubelet.Config.Content) + kubeletConfig := conf.Components.Kubelet.Config.Content.(map[string]interface{}) + + { + assert.Nil(t, conf.Components.Kubelet.AnonymousAuth) + assert.Equal(t, false, kubeletConfig["authentication"].(map[string]interface{})["anonymous"].(map[string]interface{})["enabled"]) + } + + { + v := 10255 + assert.NotNil(t, conf.Components.Kubelet.ReadOnlyPort) + assert.Equal(t, &v, conf.Components.Kubelet.ReadOnlyPort) + assert.Nil(t, kubeletConfig["readOnlyPort"]) + } + { content, ok := conf.Components.Kubelet.Config.Content.(map[string]interface{}) assert.True(t, ok) @@ -347,6 +362,17 @@ func TestKubEksConfigLoader(t *testing.T) { clientCAFile, ok := x509["clientCAFile"].(*K8sCertFileMeta) assert.True(t, ok) assert.NotNil(t, clientCAFile) + assert.Nil(t, conf.Components.Kubelet.ClientCaFile) + + assert.Equal(t, true, content["featureGates"].(map[string]interface{})["RotateKubeletServerCertificate"]) + + assert.Nil(t, conf.Components.Kubelet.AuthorizationMode) + assert.Equal(t, "Webhook", content["authorization"].(map[string]interface{})["mode"]) + + sevenTeen := 17 + eigthTeen := 18 + assert.Equal(t, &sevenTeen, conf.Components.Kubelet.MaxPods) + assert.Equal(t, float64(eigthTeen), content["maxPods"]) } } diff --git a/pkg/compliance/k8sconfig/types_generated.go b/pkg/compliance/k8sconfig/types_generated.go index c38881462c8cd..66edb1939ecd8 100644 --- a/pkg/compliance/k8sconfig/types_generated.go +++ b/pkg/compliance/k8sconfig/types_generated.go @@ -4,7 +4,7 @@ // Copyright 2016-present Datadog, Inc. // !!! -// This is a generated file: regenerate with go run ./pkg/compliance/tools/k8s_types_generator.go +// This is a generated file: regenerate with go run ./pkg/compliance/tools/k8s_types_generator/main.go // !!! // //revive:disable @@ -16,47 +16,48 @@ import ( ) type K8sKubeApiserverConfig struct { - AdmissionControlConfigFile *K8sAdmissionConfigFileMeta `json:"admission-control-config-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - AllowPrivileged bool `json:"allow-privileged"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - AnonymousAuth bool `json:"anonymous-auth"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - AuditLogMaxage int `json:"audit-log-maxage"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - AuditLogMaxbackup int `json:"audit-log-maxbackup"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - AuditLogMaxsize int `json:"audit-log-maxsize"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - AuditLogPath string `json:"audit-log-path"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - AuditPolicyFile *K8sConfigFileMeta `json:"audit-policy-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - AuthorizationMode []string `json:"authorization-mode"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - BindAddress string `json:"bind-address"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ClientCaFile *K8sCertFileMeta `json:"client-ca-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - DisableAdmissionPlugins []string `json:"disable-admission-plugins"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - EnableAdmissionPlugins []string `json:"enable-admission-plugins"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - EnableBootstrapTokenAuth bool `json:"enable-bootstrap-token-auth"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - EncryptionProviderConfig *K8sEncryptionProviderConfigFileMeta `json:"encryption-provider-config"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - EtcdCafile *K8sCertFileMeta `json:"etcd-cafile"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - EtcdCertfile *K8sCertFileMeta `json:"etcd-certfile"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - EtcdKeyfile *K8sKeyFileMeta `json:"etcd-keyfile"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - FeatureGates string `json:"feature-gates"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - KubeletCertificateAuthority *K8sCertFileMeta `json:"kubelet-certificate-authority"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - KubeletClientCertificate *K8sCertFileMeta `json:"kubelet-client-certificate"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - KubeletClientKey *K8sKeyFileMeta `json:"kubelet-client-key"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - Profiling bool `json:"profiling"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ProxyClientCertFile *K8sCertFileMeta `json:"proxy-client-cert-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ProxyClientKeyFile *K8sKeyFileMeta `json:"proxy-client-key-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RequestTimeout time.Duration `json:"request-timeout"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RequestheaderAllowedNames []string `json:"requestheader-allowed-names"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RequestheaderClientCaFile *K8sCertFileMeta `json:"requestheader-client-ca-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RequestheaderExtraHeadersPrefix []string `json:"requestheader-extra-headers-prefix"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RequestheaderGroupHeaders []string `json:"requestheader-group-headers"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RequestheaderUsernameHeaders []string `json:"requestheader-username-headers"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - SecurePort int `json:"secure-port"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ServiceAccountIssuer string `json:"service-account-issuer"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ServiceAccountKeyFile *K8sKeyFileMeta `json:"service-account-key-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ServiceAccountLookup bool `json:"service-account-lookup"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ServiceAccountSigningKeyFile *K8sKeyFileMeta `json:"service-account-signing-key-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ServiceClusterIpRange string `json:"service-cluster-ip-range"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - TlsCertFile *K8sCertFileMeta `json:"tls-cert-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - TlsCipherSuites []string `json:"tls-cipher-suites"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - TlsPrivateKeyFile *K8sKeyFileMeta `json:"tls-private-key-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - TokenAuthFile *K8sTokenFileMeta `json:"token-auth-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 + AdmissionControlConfigFile *K8sAdmissionConfigFileMeta `json:"admission-control-config-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + AllowPrivileged *bool `json:"allow-privileged,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + AnonymousAuth *bool `json:"anonymous-auth,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + AuditLogMaxage *int `json:"audit-log-maxage,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + AuditLogMaxbackup *int `json:"audit-log-maxbackup,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + AuditLogMaxsize *int `json:"audit-log-maxsize,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + AuditLogPath *string `json:"audit-log-path,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + AuditPolicyFile *K8sConfigFileMeta `json:"audit-policy-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + AuthorizationMode []string `json:"authorization-mode,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + BindAddress *string `json:"bind-address,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ClientCaFile *K8sCertFileMeta `json:"client-ca-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + DisableAdmissionPlugins []string `json:"disable-admission-plugins,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + EnableAdmissionPlugins []string `json:"enable-admission-plugins,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + EnableBootstrapTokenAuth *bool `json:"enable-bootstrap-token-auth,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + EncryptionProviderConfig *K8sEncryptionProviderConfigFileMeta `json:"encryption-provider-config,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + EtcdCafile *K8sCertFileMeta `json:"etcd-cafile,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + EtcdCertfile *K8sCertFileMeta `json:"etcd-certfile,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + EtcdKeyfile *K8sKeyFileMeta `json:"etcd-keyfile,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + FeatureGates *string `json:"feature-gates,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + KubeletCertificateAuthority *K8sCertFileMeta `json:"kubelet-certificate-authority,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + KubeletClientCertificate *K8sCertFileMeta `json:"kubelet-client-certificate,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + KubeletClientKey *K8sKeyFileMeta `json:"kubelet-client-key,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + Profiling *bool `json:"profiling,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ProxyClientCertFile *K8sCertFileMeta `json:"proxy-client-cert-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ProxyClientKeyFile *K8sKeyFileMeta `json:"proxy-client-key-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RequestTimeout *time.Duration `json:"request-timeout,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RequestheaderAllowedNames []string `json:"requestheader-allowed-names,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RequestheaderClientCaFile *K8sCertFileMeta `json:"requestheader-client-ca-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RequestheaderExtraHeadersPrefix []string `json:"requestheader-extra-headers-prefix,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RequestheaderGroupHeaders []string `json:"requestheader-group-headers,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RequestheaderUsernameHeaders []string `json:"requestheader-username-headers,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + SecurePort *int `json:"secure-port,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ServiceAccountIssuer *string `json:"service-account-issuer,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ServiceAccountKeyFile *K8sKeyFileMeta `json:"service-account-key-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ServiceAccountLookup *bool `json:"service-account-lookup,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ServiceAccountSigningKeyFile *K8sKeyFileMeta `json:"service-account-signing-key-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ServiceClusterIpRange *string `json:"service-cluster-ip-range,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + TlsCertFile *K8sCertFileMeta `json:"tls-cert-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + TlsCipherSuites []string `json:"tls-cipher-suites,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + TlsMinVersion *string `json:"tls-min-version,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + TlsPrivateKeyFile *K8sKeyFileMeta `json:"tls-private-key-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + TokenAuthFile *K8sTokenFileMeta `json:"token-auth-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 SkippedFlags map[string]string `json:"skippedFlags,omitempty"` } @@ -72,34 +73,33 @@ func (l *loader) newK8sKubeApiserverConfig(flags map[string]string) *K8sKubeApis if v, ok := flags["--allow-privileged"]; ok { delete(flags, "--allow-privileged") res.AllowPrivileged = l.parseBool(v) + + } else { + res.AllowPrivileged = l.parseBool("false") } if v, ok := flags["--anonymous-auth"]; ok { delete(flags, "--anonymous-auth") res.AnonymousAuth = l.parseBool(v) + } else { res.AnonymousAuth = l.parseBool("true") } if v, ok := flags["--audit-log-maxage"]; ok { delete(flags, "--audit-log-maxage") res.AuditLogMaxage = l.parseInt(v) - } else { - res.AuditLogMaxage = l.parseInt("0") } if v, ok := flags["--audit-log-maxbackup"]; ok { delete(flags, "--audit-log-maxbackup") res.AuditLogMaxbackup = l.parseInt(v) - } else { - res.AuditLogMaxbackup = l.parseInt("0") } if v, ok := flags["--audit-log-maxsize"]; ok { delete(flags, "--audit-log-maxsize") res.AuditLogMaxsize = l.parseInt(v) - } else { - res.AuditLogMaxsize = l.parseInt("0") } if v, ok := flags["--audit-log-path"]; ok { delete(flags, "--audit-log-path") - res.AuditLogPath = v + v := v + res.AuditLogPath = &v } if v, ok := flags["--audit-policy-file"]; ok { delete(flags, "--audit-policy-file") @@ -108,14 +108,18 @@ func (l *loader) newK8sKubeApiserverConfig(flags map[string]string) *K8sKubeApis if v, ok := flags["--authorization-mode"]; ok { delete(flags, "--authorization-mode") res.AuthorizationMode = strings.Split(v, ",") + } else { res.AuthorizationMode = strings.Split("AlwaysAllow", ",") } if v, ok := flags["--bind-address"]; ok { delete(flags, "--bind-address") - res.BindAddress = v + v := v + res.BindAddress = &v + } else { - res.BindAddress = "0.0.0.0" + v := "0.0.0.0" + res.BindAddress = &v } if v, ok := flags["--client-ca-file"]; ok { delete(flags, "--client-ca-file") @@ -151,7 +155,8 @@ func (l *loader) newK8sKubeApiserverConfig(flags map[string]string) *K8sKubeApis } if v, ok := flags["--feature-gates"]; ok { delete(flags, "--feature-gates") - res.FeatureGates = v + v := v + res.FeatureGates = &v } if v, ok := flags["--kubelet-certificate-authority"]; ok { delete(flags, "--kubelet-certificate-authority") @@ -168,6 +173,7 @@ func (l *loader) newK8sKubeApiserverConfig(flags map[string]string) *K8sKubeApis if v, ok := flags["--profiling"]; ok { delete(flags, "--profiling") res.Profiling = l.parseBool(v) + } else { res.Profiling = l.parseBool("true") } @@ -182,6 +188,7 @@ func (l *loader) newK8sKubeApiserverConfig(flags map[string]string) *K8sKubeApis if v, ok := flags["--request-timeout"]; ok { delete(flags, "--request-timeout") res.RequestTimeout = l.parseDuration(v) + } else { res.RequestTimeout = l.parseDuration("1m0s") } @@ -208,12 +215,14 @@ func (l *loader) newK8sKubeApiserverConfig(flags map[string]string) *K8sKubeApis if v, ok := flags["--secure-port"]; ok { delete(flags, "--secure-port") res.SecurePort = l.parseInt(v) + } else { res.SecurePort = l.parseInt("6443") } if v, ok := flags["--service-account-issuer"]; ok { delete(flags, "--service-account-issuer") - res.ServiceAccountIssuer = v + v := v + res.ServiceAccountIssuer = &v } if v, ok := flags["--service-account-key-file"]; ok { delete(flags, "--service-account-key-file") @@ -222,6 +231,7 @@ func (l *loader) newK8sKubeApiserverConfig(flags map[string]string) *K8sKubeApis if v, ok := flags["--service-account-lookup"]; ok { delete(flags, "--service-account-lookup") res.ServiceAccountLookup = l.parseBool(v) + } else { res.ServiceAccountLookup = l.parseBool("true") } @@ -231,7 +241,8 @@ func (l *loader) newK8sKubeApiserverConfig(flags map[string]string) *K8sKubeApis } if v, ok := flags["--service-cluster-ip-range"]; ok { delete(flags, "--service-cluster-ip-range") - res.ServiceClusterIpRange = v + v := v + res.ServiceClusterIpRange = &v } if v, ok := flags["--tls-cert-file"]; ok { delete(flags, "--tls-cert-file") @@ -241,6 +252,11 @@ func (l *loader) newK8sKubeApiserverConfig(flags map[string]string) *K8sKubeApis delete(flags, "--tls-cipher-suites") res.TlsCipherSuites = strings.Split(v, ",") } + if v, ok := flags["--tls-min-version"]; ok { + delete(flags, "--tls-min-version") + v := v + res.TlsMinVersion = &v + } if v, ok := flags["--tls-private-key-file"]; ok { delete(flags, "--tls-private-key-file") res.TlsPrivateKeyFile = l.loadKeyFileMeta(v) @@ -256,23 +272,24 @@ func (l *loader) newK8sKubeApiserverConfig(flags map[string]string) *K8sKubeApis } type K8sKubeSchedulerConfig struct { - AuthenticationKubeconfig *K8sKubeconfigMeta `json:"authentication-kubeconfig"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - AuthorizationKubeconfig string `json:"authorization-kubeconfig"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - BindAddress string `json:"bind-address"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ClientCaFile *K8sCertFileMeta `json:"client-ca-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - Config *K8sConfigFileMeta `json:"config"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - FeatureGates string `json:"feature-gates"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - Kubeconfig *K8sKubeconfigMeta `json:"kubeconfig"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - Profiling bool `json:"profiling"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RequestheaderAllowedNames []string `json:"requestheader-allowed-names"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RequestheaderClientCaFile *K8sCertFileMeta `json:"requestheader-client-ca-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RequestheaderExtraHeadersPrefix []string `json:"requestheader-extra-headers-prefix"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RequestheaderGroupHeaders []string `json:"requestheader-group-headers"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RequestheaderUsernameHeaders []string `json:"requestheader-username-headers"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - SecurePort int `json:"secure-port"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - TlsCertFile *K8sCertFileMeta `json:"tls-cert-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - TlsCipherSuites []string `json:"tls-cipher-suites"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - TlsPrivateKeyFile *K8sKeyFileMeta `json:"tls-private-key-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 + Config *K8sConfigFileMeta `json:"config,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + AuthenticationKubeconfig *K8sKubeconfigMeta `json:"authentication-kubeconfig,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + AuthorizationKubeconfig *string `json:"authorization-kubeconfig,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + BindAddress *string `json:"bind-address,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ClientCaFile *K8sCertFileMeta `json:"client-ca-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + FeatureGates *string `json:"feature-gates,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + Kubeconfig *K8sKubeconfigMeta `json:"kubeconfig,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + Profiling *bool `json:"profiling,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RequestheaderAllowedNames []string `json:"requestheader-allowed-names,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RequestheaderClientCaFile *K8sCertFileMeta `json:"requestheader-client-ca-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RequestheaderExtraHeadersPrefix []string `json:"requestheader-extra-headers-prefix,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RequestheaderGroupHeaders []string `json:"requestheader-group-headers,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RequestheaderUsernameHeaders []string `json:"requestheader-username-headers,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + SecurePort *int `json:"secure-port,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + TlsCertFile *K8sCertFileMeta `json:"tls-cert-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + TlsCipherSuites []string `json:"tls-cipher-suites,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + TlsMinVersion *string `json:"tls-min-version,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + TlsPrivateKeyFile *K8sKeyFileMeta `json:"tls-private-key-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 SkippedFlags map[string]string `json:"skippedFlags,omitempty"` } @@ -281,31 +298,36 @@ func (l *loader) newK8sKubeSchedulerConfig(flags map[string]string) *K8sKubeSche return nil } var res K8sKubeSchedulerConfig + if v, ok := flags["--config"]; ok { + delete(flags, "--config") + res.Config = l.loadConfigFileMeta(v) + } if v, ok := flags["--authentication-kubeconfig"]; ok { delete(flags, "--authentication-kubeconfig") res.AuthenticationKubeconfig = l.loadKubeconfigMeta(v) } if v, ok := flags["--authorization-kubeconfig"]; ok { delete(flags, "--authorization-kubeconfig") - res.AuthorizationKubeconfig = v + v := v + res.AuthorizationKubeconfig = &v } if v, ok := flags["--bind-address"]; ok { delete(flags, "--bind-address") - res.BindAddress = v + v := v + res.BindAddress = &v + } else { - res.BindAddress = "0.0.0.0" + v := "0.0.0.0" + res.BindAddress = &v } if v, ok := flags["--client-ca-file"]; ok { delete(flags, "--client-ca-file") res.ClientCaFile = l.loadCertFileMeta(v) } - if v, ok := flags["--config"]; ok { - delete(flags, "--config") - res.Config = l.loadConfigFileMeta(v) - } if v, ok := flags["--feature-gates"]; ok { delete(flags, "--feature-gates") - res.FeatureGates = v + v := v + res.FeatureGates = &v } if v, ok := flags["--kubeconfig"]; ok { delete(flags, "--kubeconfig") @@ -314,7 +336,8 @@ func (l *loader) newK8sKubeSchedulerConfig(flags map[string]string) *K8sKubeSche if v, ok := flags["--profiling"]; ok { delete(flags, "--profiling") res.Profiling = l.parseBool(v) - } else { + + } else if !l.configFileMetaHasField(res.Config, "enableProfiling") { res.Profiling = l.parseBool("true") } if v, ok := flags["--requestheader-allowed-names"]; ok { @@ -328,24 +351,28 @@ func (l *loader) newK8sKubeSchedulerConfig(flags map[string]string) *K8sKubeSche if v, ok := flags["--requestheader-extra-headers-prefix"]; ok { delete(flags, "--requestheader-extra-headers-prefix") res.RequestheaderExtraHeadersPrefix = strings.Split(v, ",") + } else { res.RequestheaderExtraHeadersPrefix = strings.Split("x-remote-extra-", ",") } if v, ok := flags["--requestheader-group-headers"]; ok { delete(flags, "--requestheader-group-headers") res.RequestheaderGroupHeaders = strings.Split(v, ",") + } else { res.RequestheaderGroupHeaders = strings.Split("x-remote-group", ",") } if v, ok := flags["--requestheader-username-headers"]; ok { delete(flags, "--requestheader-username-headers") res.RequestheaderUsernameHeaders = strings.Split(v, ",") + } else { res.RequestheaderUsernameHeaders = strings.Split("x-remote-user", ",") } if v, ok := flags["--secure-port"]; ok { delete(flags, "--secure-port") res.SecurePort = l.parseInt(v) + } else { res.SecurePort = l.parseInt("10259") } @@ -357,6 +384,11 @@ func (l *loader) newK8sKubeSchedulerConfig(flags map[string]string) *K8sKubeSche delete(flags, "--tls-cipher-suites") res.TlsCipherSuites = strings.Split(v, ",") } + if v, ok := flags["--tls-min-version"]; ok { + delete(flags, "--tls-min-version") + v := v + res.TlsMinVersion = &v + } if v, ok := flags["--tls-private-key-file"]; ok { delete(flags, "--tls-private-key-file") res.TlsPrivateKeyFile = l.loadKeyFileMeta(v) @@ -368,29 +400,30 @@ func (l *loader) newK8sKubeSchedulerConfig(flags map[string]string) *K8sKubeSche } type K8sKubeControllerManagerConfig struct { - AuthenticationKubeconfig *K8sKubeconfigMeta `json:"authentication-kubeconfig"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - AuthorizationKubeconfig string `json:"authorization-kubeconfig"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - BindAddress string `json:"bind-address"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ClientCaFile *K8sCertFileMeta `json:"client-ca-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ClusterSigningCertFile *K8sCertFileMeta `json:"cluster-signing-cert-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ClusterSigningKeyFile *K8sKeyFileMeta `json:"cluster-signing-key-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - FeatureGates string `json:"feature-gates"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - Kubeconfig *K8sKubeconfigMeta `json:"kubeconfig"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - Profiling bool `json:"profiling"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RequestheaderAllowedNames []string `json:"requestheader-allowed-names"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RequestheaderClientCaFile *K8sCertFileMeta `json:"requestheader-client-ca-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RequestheaderExtraHeadersPrefix []string `json:"requestheader-extra-headers-prefix"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RequestheaderGroupHeaders []string `json:"requestheader-group-headers"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RequestheaderUsernameHeaders []string `json:"requestheader-username-headers"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RootCaFile *K8sCertFileMeta `json:"root-ca-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - SecurePort int `json:"secure-port"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ServiceAccountPrivateKeyFile *K8sKeyFileMeta `json:"service-account-private-key-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ServiceClusterIpRange string `json:"service-cluster-ip-range"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - TerminatedPodGcThreshold int `json:"terminated-pod-gc-threshold"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - TlsCertFile *K8sCertFileMeta `json:"tls-cert-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - TlsCipherSuites []string `json:"tls-cipher-suites"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - TlsPrivateKeyFile *K8sKeyFileMeta `json:"tls-private-key-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - UseServiceAccountCredentials bool `json:"use-service-account-credentials"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 + AuthenticationKubeconfig *K8sKubeconfigMeta `json:"authentication-kubeconfig,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + AuthorizationKubeconfig *string `json:"authorization-kubeconfig,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + BindAddress *string `json:"bind-address,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ClientCaFile *K8sCertFileMeta `json:"client-ca-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ClusterSigningCertFile *K8sCertFileMeta `json:"cluster-signing-cert-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ClusterSigningKeyFile *K8sKeyFileMeta `json:"cluster-signing-key-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + FeatureGates *string `json:"feature-gates,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + Kubeconfig *K8sKubeconfigMeta `json:"kubeconfig,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + Profiling *bool `json:"profiling,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RequestheaderAllowedNames []string `json:"requestheader-allowed-names,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RequestheaderClientCaFile *K8sCertFileMeta `json:"requestheader-client-ca-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RequestheaderExtraHeadersPrefix []string `json:"requestheader-extra-headers-prefix,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RequestheaderGroupHeaders []string `json:"requestheader-group-headers,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RequestheaderUsernameHeaders []string `json:"requestheader-username-headers,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RootCaFile *K8sCertFileMeta `json:"root-ca-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + SecurePort *int `json:"secure-port,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ServiceAccountPrivateKeyFile *K8sKeyFileMeta `json:"service-account-private-key-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ServiceClusterIpRange *string `json:"service-cluster-ip-range,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + TerminatedPodGcThreshold *int `json:"terminated-pod-gc-threshold,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + TlsCertFile *K8sCertFileMeta `json:"tls-cert-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + TlsCipherSuites []string `json:"tls-cipher-suites,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + TlsMinVersion *string `json:"tls-min-version,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + TlsPrivateKeyFile *K8sKeyFileMeta `json:"tls-private-key-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + UseServiceAccountCredentials *bool `json:"use-service-account-credentials,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 SkippedFlags map[string]string `json:"skippedFlags,omitempty"` } @@ -405,13 +438,17 @@ func (l *loader) newK8sKubeControllerManagerConfig(flags map[string]string) *K8s } if v, ok := flags["--authorization-kubeconfig"]; ok { delete(flags, "--authorization-kubeconfig") - res.AuthorizationKubeconfig = v + v := v + res.AuthorizationKubeconfig = &v } if v, ok := flags["--bind-address"]; ok { delete(flags, "--bind-address") - res.BindAddress = v + v := v + res.BindAddress = &v + } else { - res.BindAddress = "0.0.0.0" + v := "0.0.0.0" + res.BindAddress = &v } if v, ok := flags["--client-ca-file"]; ok { delete(flags, "--client-ca-file") @@ -427,7 +464,8 @@ func (l *loader) newK8sKubeControllerManagerConfig(flags map[string]string) *K8s } if v, ok := flags["--feature-gates"]; ok { delete(flags, "--feature-gates") - res.FeatureGates = v + v := v + res.FeatureGates = &v } if v, ok := flags["--kubeconfig"]; ok { delete(flags, "--kubeconfig") @@ -436,6 +474,7 @@ func (l *loader) newK8sKubeControllerManagerConfig(flags map[string]string) *K8s if v, ok := flags["--profiling"]; ok { delete(flags, "--profiling") res.Profiling = l.parseBool(v) + } else { res.Profiling = l.parseBool("true") } @@ -450,18 +489,21 @@ func (l *loader) newK8sKubeControllerManagerConfig(flags map[string]string) *K8s if v, ok := flags["--requestheader-extra-headers-prefix"]; ok { delete(flags, "--requestheader-extra-headers-prefix") res.RequestheaderExtraHeadersPrefix = strings.Split(v, ",") + } else { res.RequestheaderExtraHeadersPrefix = strings.Split("x-remote-extra-", ",") } if v, ok := flags["--requestheader-group-headers"]; ok { delete(flags, "--requestheader-group-headers") res.RequestheaderGroupHeaders = strings.Split(v, ",") + } else { res.RequestheaderGroupHeaders = strings.Split("x-remote-group", ",") } if v, ok := flags["--requestheader-username-headers"]; ok { delete(flags, "--requestheader-username-headers") res.RequestheaderUsernameHeaders = strings.Split(v, ",") + } else { res.RequestheaderUsernameHeaders = strings.Split("x-remote-user", ",") } @@ -472,6 +514,7 @@ func (l *loader) newK8sKubeControllerManagerConfig(flags map[string]string) *K8s if v, ok := flags["--secure-port"]; ok { delete(flags, "--secure-port") res.SecurePort = l.parseInt(v) + } else { res.SecurePort = l.parseInt("10257") } @@ -481,11 +524,13 @@ func (l *loader) newK8sKubeControllerManagerConfig(flags map[string]string) *K8s } if v, ok := flags["--service-cluster-ip-range"]; ok { delete(flags, "--service-cluster-ip-range") - res.ServiceClusterIpRange = v + v := v + res.ServiceClusterIpRange = &v } if v, ok := flags["--terminated-pod-gc-threshold"]; ok { delete(flags, "--terminated-pod-gc-threshold") res.TerminatedPodGcThreshold = l.parseInt(v) + } else { res.TerminatedPodGcThreshold = l.parseInt("12500") } @@ -497,6 +542,11 @@ func (l *loader) newK8sKubeControllerManagerConfig(flags map[string]string) *K8s delete(flags, "--tls-cipher-suites") res.TlsCipherSuites = strings.Split(v, ",") } + if v, ok := flags["--tls-min-version"]; ok { + delete(flags, "--tls-min-version") + v := v + res.TlsMinVersion = &v + } if v, ok := flags["--tls-private-key-file"]; ok { delete(flags, "--tls-private-key-file") res.TlsPrivateKeyFile = l.loadKeyFileMeta(v) @@ -512,12 +562,12 @@ func (l *loader) newK8sKubeControllerManagerConfig(flags map[string]string) *K8s } type K8sKubeProxyConfig struct { - BindAddress string `json:"bind-address"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - Config *K8sConfigFileMeta `json:"config"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - FeatureGates string `json:"feature-gates"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - HostnameOverride string `json:"hostname-override"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - Kubeconfig *K8sKubeconfigMeta `json:"kubeconfig"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - Profiling bool `json:"profiling"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 + Config *K8sConfigFileMeta `json:"config,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + BindAddress *string `json:"bind-address,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + FeatureGates *string `json:"feature-gates,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + HostnameOverride *string `json:"hostname-override,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + Kubeconfig *K8sKubeconfigMeta `json:"kubeconfig,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + Profiling *bool `json:"profiling,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 SkippedFlags map[string]string `json:"skippedFlags,omitempty"` } @@ -526,23 +576,28 @@ func (l *loader) newK8sKubeProxyConfig(flags map[string]string) *K8sKubeProxyCon return nil } var res K8sKubeProxyConfig - if v, ok := flags["--bind-address"]; ok { - delete(flags, "--bind-address") - res.BindAddress = v - } else { - res.BindAddress = "0.0.0.0" - } if v, ok := flags["--config"]; ok { delete(flags, "--config") res.Config = l.loadConfigFileMeta(v) } + if v, ok := flags["--bind-address"]; ok { + delete(flags, "--bind-address") + v := v + res.BindAddress = &v + + } else { + v := "0.0.0.0" + res.BindAddress = &v + } if v, ok := flags["--feature-gates"]; ok { delete(flags, "--feature-gates") - res.FeatureGates = v + v := v + res.FeatureGates = &v } if v, ok := flags["--hostname-override"]; ok { delete(flags, "--hostname-override") - res.HostnameOverride = v + v := v + res.HostnameOverride = &v } if v, ok := flags["--kubeconfig"]; ok { delete(flags, "--kubeconfig") @@ -559,29 +614,30 @@ func (l *loader) newK8sKubeProxyConfig(flags map[string]string) *K8sKubeProxyCon } type K8sKubeletConfig struct { - Address string `json:"address"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - AnonymousAuth bool `json:"anonymous-auth"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - AuthorizationMode string `json:"authorization-mode"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ClientCaFile *K8sCertFileMeta `json:"client-ca-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - Config *K8sConfigFileMeta `json:"config"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - EventBurst int `json:"event-burst"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - EventQps int `json:"event-qps"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - FeatureGates string `json:"feature-gates"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - HostnameOverride string `json:"hostname-override"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ImageCredentialProviderBinDir *K8sDirMeta `json:"image-credential-provider-bin-dir"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ImageCredentialProviderConfig *K8sConfigFileMeta `json:"image-credential-provider-config"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - Kubeconfig *K8sKubeconfigMeta `json:"kubeconfig"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - MakeIptablesUtilChains bool `json:"make-iptables-util-chains"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - MaxPods int `json:"max-pods"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - PodMaxPids int `json:"pod-max-pids"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ProtectKernelDefaults bool `json:"protect-kernel-defaults"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - ReadOnlyPort int `json:"read-only-port"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RotateCertificates bool `json:"rotate-certificates"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - RotateServerCertificates bool `json:"rotate-server-certificates"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - StreamingConnectionIdleTimeout time.Duration `json:"streaming-connection-idle-timeout"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - TlsCertFile *K8sCertFileMeta `json:"tls-cert-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - TlsCipherSuites []string `json:"tls-cipher-suites"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 - TlsPrivateKeyFile *K8sKeyFileMeta `json:"tls-private-key-file"` // versions: v1.27.3, v1.26.6, v1.25.11, v1.24.15, v1.23.17 + Config *K8sConfigFileMeta `json:"config,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + Address *string `json:"address,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + AnonymousAuth *bool `json:"anonymous-auth,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + AuthorizationMode *string `json:"authorization-mode,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ClientCaFile *K8sCertFileMeta `json:"client-ca-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + EventBurst *int `json:"event-burst,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + EventQps *int `json:"event-qps,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + FeatureGates *string `json:"feature-gates,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + HostnameOverride *string `json:"hostname-override,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ImageCredentialProviderBinDir *K8sDirMeta `json:"image-credential-provider-bin-dir,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ImageCredentialProviderConfig *K8sConfigFileMeta `json:"image-credential-provider-config,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + Kubeconfig *K8sKubeconfigMeta `json:"kubeconfig,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + MakeIptablesUtilChains *bool `json:"make-iptables-util-chains,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + MaxPods *int `json:"max-pods,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + PodMaxPids *int `json:"pod-max-pids,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ProtectKernelDefaults *bool `json:"protect-kernel-defaults,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + ReadOnlyPort *int `json:"read-only-port,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RotateCertificates *bool `json:"rotate-certificates,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + RotateServerCertificates *bool `json:"rotate-server-certificates,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + StreamingConnectionIdleTimeout *time.Duration `json:"streaming-connection-idle-timeout,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + TlsCertFile *K8sCertFileMeta `json:"tls-cert-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + TlsCipherSuites []string `json:"tls-cipher-suites,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + TlsMinVersion *string `json:"tls-min-version,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 + TlsPrivateKeyFile *K8sKeyFileMeta `json:"tls-private-key-file,omitempty"` // versions: v1.28.4, v1.27.3, v1.26.6, v1.25.11, v1.24.15 SkippedFlags map[string]string `json:"skippedFlags,omitempty"` } @@ -590,51 +646,62 @@ func (l *loader) newK8sKubeletConfig(flags map[string]string) *K8sKubeletConfig return nil } var res K8sKubeletConfig + if v, ok := flags["--config"]; ok { + delete(flags, "--config") + res.Config = l.loadKubeletConfigFileMeta(v) + } if v, ok := flags["--address"]; ok { delete(flags, "--address") - res.Address = v - } else { - res.Address = "0.0.0.0" + v := v + res.Address = &v + + } else if !l.configFileMetaHasField(res.Config, "address") { + v := "0.0.0.0" + res.Address = &v } if v, ok := flags["--anonymous-auth"]; ok { delete(flags, "--anonymous-auth") res.AnonymousAuth = l.parseBool(v) - } else { + + } else if !l.configFileMetaHasField(res.Config, "authentication.anonymous.enabled") { res.AnonymousAuth = l.parseBool("true") } if v, ok := flags["--authorization-mode"]; ok { delete(flags, "--authorization-mode") - res.AuthorizationMode = v - } else { - res.AuthorizationMode = "AlwaysAllow" + v := v + res.AuthorizationMode = &v + + } else if !l.configFileMetaHasField(res.Config, "authorization.mode") { + v := "AlwaysAllow" + res.AuthorizationMode = &v } if v, ok := flags["--client-ca-file"]; ok { delete(flags, "--client-ca-file") res.ClientCaFile = l.loadCertFileMeta(v) } - if v, ok := flags["--config"]; ok { - delete(flags, "--config") - res.Config = l.loadKubeletConfigFileMeta(v) - } if v, ok := flags["--event-burst"]; ok { delete(flags, "--event-burst") res.EventBurst = l.parseInt(v) - } else { + + } else if !l.configFileMetaHasField(res.Config, "eventBurst") { res.EventBurst = l.parseInt("100") } if v, ok := flags["--event-qps"]; ok { delete(flags, "--event-qps") res.EventQps = l.parseInt(v) - } else { + + } else if !l.configFileMetaHasField(res.Config, "eventRecordQPS") { res.EventQps = l.parseInt("50") } if v, ok := flags["--feature-gates"]; ok { delete(flags, "--feature-gates") - res.FeatureGates = v + v := v + res.FeatureGates = &v } if v, ok := flags["--hostname-override"]; ok { delete(flags, "--hostname-override") - res.HostnameOverride = v + v := v + res.HostnameOverride = &v } if v, ok := flags["--image-credential-provider-bin-dir"]; ok { delete(flags, "--image-credential-provider-bin-dir") @@ -651,19 +718,22 @@ func (l *loader) newK8sKubeletConfig(flags map[string]string) *K8sKubeletConfig if v, ok := flags["--make-iptables-util-chains"]; ok { delete(flags, "--make-iptables-util-chains") res.MakeIptablesUtilChains = l.parseBool(v) - } else { + + } else if !l.configFileMetaHasField(res.Config, "makeIPTablesUtilChains") { res.MakeIptablesUtilChains = l.parseBool("true") } if v, ok := flags["--max-pods"]; ok { delete(flags, "--max-pods") res.MaxPods = l.parseInt(v) - } else { + + } else if !l.configFileMetaHasField(res.Config, "maxPods") { res.MaxPods = l.parseInt("110") } if v, ok := flags["--pod-max-pids"]; ok { delete(flags, "--pod-max-pids") res.PodMaxPids = l.parseInt(v) - } else { + + } else if !l.configFileMetaHasField(res.Config, "podPidsLimit") { res.PodMaxPids = l.parseInt("-1") } if v, ok := flags["--protect-kernel-defaults"]; ok { @@ -673,7 +743,8 @@ func (l *loader) newK8sKubeletConfig(flags map[string]string) *K8sKubeletConfig if v, ok := flags["--read-only-port"]; ok { delete(flags, "--read-only-port") res.ReadOnlyPort = l.parseInt(v) - } else { + + } else if !l.configFileMetaHasField(res.Config, "readOnlyPort") { res.ReadOnlyPort = l.parseInt("10255") } if v, ok := flags["--rotate-certificates"]; ok { @@ -687,7 +758,8 @@ func (l *loader) newK8sKubeletConfig(flags map[string]string) *K8sKubeletConfig if v, ok := flags["--streaming-connection-idle-timeout"]; ok { delete(flags, "--streaming-connection-idle-timeout") res.StreamingConnectionIdleTimeout = l.parseDuration(v) - } else { + + } else if !l.configFileMetaHasField(res.Config, "streamingConnectionIdleTimeout") { res.StreamingConnectionIdleTimeout = l.parseDuration("4h0m0s") } if v, ok := flags["--tls-cert-file"]; ok { @@ -698,6 +770,11 @@ func (l *loader) newK8sKubeletConfig(flags map[string]string) *K8sKubeletConfig delete(flags, "--tls-cipher-suites") res.TlsCipherSuites = strings.Split(v, ",") } + if v, ok := flags["--tls-min-version"]; ok { + delete(flags, "--tls-min-version") + v := v + res.TlsMinVersion = &v + } if v, ok := flags["--tls-private-key-file"]; ok { delete(flags, "--tls-private-key-file") res.TlsPrivateKeyFile = l.loadKeyFileMeta(v) @@ -709,17 +786,18 @@ func (l *loader) newK8sKubeletConfig(flags map[string]string) *K8sKubeletConfig } type K8sEtcdConfig struct { - AutoTls bool `json:"auto-tls"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 - CertFile *K8sCertFileMeta `json:"cert-file"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 - ClientCertAuth bool `json:"client-cert-auth"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 - DataDir *K8sDirMeta `json:"data-dir"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 - KeyFile *K8sKeyFileMeta `json:"key-file"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 - PeerAutoTls bool `json:"peer-auto-tls"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 - PeerCertFile *K8sCertFileMeta `json:"peer-cert-file"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 - PeerClientCertAuth bool `json:"peer-client-cert-auth"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 - PeerKeyFile *K8sKeyFileMeta `json:"peer-key-file"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 - PeerTrustedCaFile *K8sCertFileMeta `json:"peer-trusted-ca-file"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 - TrustedCaFile *K8sCertFileMeta `json:"trusted-ca-file"` // versions: v3.5.7, v3.4.18, v3.3.17, v3.2.32 + AutoTls *bool `json:"auto-tls,omitempty"` // versions: v3.5.10, v3.4.28, v3.3.17, v3.2.32 + CertFile *K8sCertFileMeta `json:"cert-file,omitempty"` // versions: v3.5.10, v3.4.28, v3.3.17, v3.2.32 + ClientCertAuth *bool `json:"client-cert-auth,omitempty"` // versions: v3.5.10, v3.4.28, v3.3.17, v3.2.32 + DataDir *K8sDirMeta `json:"data-dir,omitempty"` // versions: v3.5.10, v3.4.28, v3.3.17, v3.2.32 + KeyFile *K8sKeyFileMeta `json:"key-file,omitempty"` // versions: v3.5.10, v3.4.28, v3.3.17, v3.2.32 + PeerAutoTls *bool `json:"peer-auto-tls,omitempty"` // versions: v3.5.10, v3.4.28, v3.3.17, v3.2.32 + PeerCertFile *K8sCertFileMeta `json:"peer-cert-file,omitempty"` // versions: v3.5.10, v3.4.28, v3.3.17, v3.2.32 + PeerClientCertAuth *bool `json:"peer-client-cert-auth,omitempty"` // versions: v3.5.10, v3.4.28, v3.3.17, v3.2.32 + PeerKeyFile *K8sKeyFileMeta `json:"peer-key-file,omitempty"` // versions: v3.5.10, v3.4.28, v3.3.17, v3.2.32 + PeerTrustedCaFile *K8sCertFileMeta `json:"peer-trusted-ca-file,omitempty"` // versions: v3.5.10, v3.4.28, v3.3.17, v3.2.32 + TlsMinVersion *string `json:"tls-min-version,omitempty"` // versions: v3.5.10, v3.4.28 + TrustedCaFile *K8sCertFileMeta `json:"trusted-ca-file,omitempty"` // versions: v3.5.10, v3.4.28, v3.3.17, v3.2.32 SkippedFlags map[string]string `json:"skippedFlags,omitempty"` } @@ -731,6 +809,9 @@ func (l *loader) newK8sEtcdConfig(flags map[string]string) *K8sEtcdConfig { if v, ok := flags["--auto-tls"]; ok { delete(flags, "--auto-tls") res.AutoTls = l.parseBool(v) + + } else { + res.AutoTls = l.parseBool("false") } if v, ok := flags["--cert-file"]; ok { delete(flags, "--cert-file") @@ -739,6 +820,9 @@ func (l *loader) newK8sEtcdConfig(flags map[string]string) *K8sEtcdConfig { if v, ok := flags["--client-cert-auth"]; ok { delete(flags, "--client-cert-auth") res.ClientCertAuth = l.parseBool(v) + + } else { + res.ClientCertAuth = l.parseBool("false") } if v, ok := flags["--data-dir"]; ok { delete(flags, "--data-dir") @@ -751,6 +835,9 @@ func (l *loader) newK8sEtcdConfig(flags map[string]string) *K8sEtcdConfig { if v, ok := flags["--peer-auto-tls"]; ok { delete(flags, "--peer-auto-tls") res.PeerAutoTls = l.parseBool(v) + + } else { + res.PeerAutoTls = l.parseBool("false") } if v, ok := flags["--peer-cert-file"]; ok { delete(flags, "--peer-cert-file") @@ -759,6 +846,9 @@ func (l *loader) newK8sEtcdConfig(flags map[string]string) *K8sEtcdConfig { if v, ok := flags["--peer-client-cert-auth"]; ok { delete(flags, "--peer-client-cert-auth") res.PeerClientCertAuth = l.parseBool(v) + + } else { + res.PeerClientCertAuth = l.parseBool("false") } if v, ok := flags["--peer-key-file"]; ok { delete(flags, "--peer-key-file") @@ -768,6 +858,15 @@ func (l *loader) newK8sEtcdConfig(flags map[string]string) *K8sEtcdConfig { delete(flags, "--peer-trusted-ca-file") res.PeerTrustedCaFile = l.loadCertFileMeta(v) } + if v, ok := flags["--tls-min-version"]; ok { + delete(flags, "--tls-min-version") + v := v + res.TlsMinVersion = &v + + } else { + v := "TLS1.2" + res.TlsMinVersion = &v + } if v, ok := flags["--trusted-ca-file"]; ok { delete(flags, "--trusted-ca-file") res.TrustedCaFile = l.loadCertFileMeta(v) diff --git a/pkg/compliance/resolver.go b/pkg/compliance/resolver.go index edf75c2d20544..865a556e33a00 100644 --- a/pkg/compliance/resolver.go +++ b/pkg/compliance/resolver.go @@ -69,7 +69,7 @@ func DefaultDockerProvider(ctx context.Context) (docker.CommonAPIClient, error) } // DefaultLinuxAuditProvider returns the default Linux Audit client. -func DefaultLinuxAuditProvider(ctx context.Context) (LinuxAuditClient, error) { //nolint:revive // TODO fix revive unused-parameter +func DefaultLinuxAuditProvider(_ context.Context) (LinuxAuditClient, error) { return newLinuxAuditClient() } @@ -498,8 +498,7 @@ func (r *defaultResolver) getProcs(ctx context.Context) ([]*process.Process, err return r.procsCache, nil } -//nolint:revive // TODO(CSPM) Fix revive linter -func (r *defaultResolver) resolveGroup(ctx context.Context, spec InputSpecGroup) (interface{}, error) { +func (r *defaultResolver) resolveGroup(_ context.Context, spec InputSpecGroup) (interface{}, error) { f, err := os.Open(r.pathNormalizeToHostRoot("/etc/group")) if err != nil { return nil, err @@ -533,8 +532,7 @@ func (r *defaultResolver) resolveGroup(ctx context.Context, spec InputSpecGroup) return nil, nil } -//nolint:revive // TODO(CSPM) Fix revive linter -func (r *defaultResolver) resolveAudit(ctx context.Context, spec InputSpecAudit) (interface{}, error) { +func (r *defaultResolver) resolveAudit(_ context.Context, spec InputSpecAudit) (interface{}, error) { cl := r.linuxAuditCl if cl == nil { return nil, ErrIncompatibleEnvironment diff --git a/pkg/compliance/tests/base_test.go b/pkg/compliance/tests/base_test.go index 731e46cf2111a..87a54357f05ed 100644 --- a/pkg/compliance/tests/base_test.go +++ b/pkg/compliance/tests/base_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(CSPM) Fix revive linter +// Package tests implements the unit tests for pkg/compliance. package tests import ( diff --git a/pkg/compliance/tests/kubernetes_test.go b/pkg/compliance/tests/kubernetes_test.go index 3e47bd851b2ff..715dfb3e570c8 100644 --- a/pkg/compliance/tests/kubernetes_test.go +++ b/pkg/compliance/tests/kubernetes_test.go @@ -20,8 +20,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" ) -//nolint:revive // TODO(CSPM) Fix revive linter -func getMockedKubeClient(t *testing.T, objects ...runtime.Object) dynamic.Interface { +func getMockedKubeClient(objects ...runtime.Object) dynamic.Interface { addKnownTypes := func(scheme *runtime.Scheme) error { scheme.AddKnownTypes(schema.GroupVersion{Group: "mygroup.com", Version: "v1"}, &MyObj{}, @@ -35,7 +34,7 @@ func getMockedKubeClient(t *testing.T, objects ...runtime.Object) dynamic.Interf } func TestKubernetesCluster(t *testing.T) { - kubeClient := getMockedKubeClient(t, + kubeClient := getMockedKubeClient( &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ UID: "my-cluster", diff --git a/pkg/compliance/tools/k8s_types_generator/main.go b/pkg/compliance/tools/k8s_types_generator/main.go index fa1a19c90692e..879164059ad8f 100644 --- a/pkg/compliance/tools/k8s_types_generator/main.go +++ b/pkg/compliance/tools/k8s_types_generator/main.go @@ -42,23 +42,27 @@ var ( // https://kubernetes.io/releases/ k8sVersions = []string{ + "v1.28.4", "v1.27.3", "v1.26.6", "v1.25.11", "v1.24.15", - "v1.23.17", } // https://github.com/kubernetes/kubernetes/blob/c3e7eca7fd38454200819b60e58144d5727f1bbc/cluster/images/etcd/Makefile#L18 // "v3.0.17", "v3.1.20" removed because they do not have ARM64 tarballs etcdVersions = []string{ - "v3.5.7", - "v3.4.18", + "v3.5.10", + "v3.4.28", "v3.3.17", "v3.2.32", } knownFlags = []string{ + // Make sure "--config" parsing is first as it implements the precedence rules + // between configuration and CLI args + "--config", + "--address", "--admission-control-config-file", "--allow-privileged", @@ -78,7 +82,6 @@ var ( "--client-cert-auth", "--cluster-signing-cert-file", "--cluster-signing-key-file", - "--config", "--data-dir", "--disable-admission-plugins", "--enable-admission-plugins", @@ -131,11 +134,45 @@ var ( "--terminated-pod-gc-threshold", "--tls-cert-file", "--tls-cipher-suites", + "--tls-min-version", "--tls-private-key-file", "--token-auth-file", "--trusted-ca-file", "--use-service-account-credentials", } + + // reference: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletAuthentication + cliConfAssoc = map[string]string{ + "kubelet.address": "address", + "kubelet.anonymous-auth": "authentication.anonymous.enabled", + "kubelet.authorization-mode": "authorization.mode", + "kubelet.client-ca-file": "authorization.x509.clientCAFile", + "kubelet.event-burst": "eventBurst", + "kubelet.event-qps": "eventRecordQPS", + "kubelet.feature-gates": "featureGates", + "kubelet.make-iptables-util-chains": "makeIPTablesUtilChains", + "kubelet.max-pods": "maxPods", + "kubelet.pod-max-pids": "podPidsLimit", + "kubelet.protect-kernel-defaults": "protectKernelDefaults", + "kubelet.read-only-port": "readOnlyPort", + "kubelet.rotate-certificates": "rotateCertificates", + "kubelet.rotate-server-certificates": "featureGates.RotateKubeletServerCertificate", + "kubelet.streaming-connection-idle-timeout": "streamingConnectionIdleTimeout", + "kubelet.tls-cert-file": "tlsCertFile", + "kubelet.tls-cipher-suites": "tlsCipherSuites", + "kubelet.tls-min-version": "tlsMinVersion", + "kubelet.tls-private-key-file": "tlsPrivateKeyFile", + + "kube-scheduler.bind-address": "", + "kube-scheduler.profiling": "enableProfiling", + "kube-scheduler.contention-profiling": "enableContentionProfiling", + "kube-scheduler.requestheader-extra-headers-prefix": "", + "kube-scheduler.requestheader-group-headers": "", + "kube-scheduler.requestheader-username-headers": "", + "kube-scheduler.secure-port": "", + + "kube-proxy.bind-address": "", + } ) const preamble = `// Unless explicitly stated otherwise all files in this repository are licensed @@ -144,7 +181,7 @@ const preamble = `// Unless explicitly stated otherwise all files in this reposi // Copyright 2016-present Datadog, Inc. // !!! -// This is a generated file: regenerate with go run ./pkg/compliance/tools/k8s_types_generator.go +// This is a generated file: regenerate with go run ./pkg/compliance/tools/k8s_types_generator/main.go // !!! //revive:disable package k8sconfig @@ -200,7 +237,6 @@ func main() { } mergedKomp := unionKomponents(komponents...) allKomponents = append(allKomponents, mergedKomp) - fmt.Println(printKomponentCode(mergedKomp)) } { @@ -211,9 +247,20 @@ func main() { } mergedKomp := unionKomponents(komponents...) allKomponents = append(allKomponents, mergedKomp) - fmt.Println(printKomponentCode(mergedKomp)) } + checkForKnownFlags(allKomponents, knownFlags) + for _, komp := range allKomponents { + sort.Slice(komp.confs, func(i, j int) bool { + ii := slices.Index(knownFlags, "--"+komp.confs[i].flagName) + jj := slices.Index(knownFlags, "--"+komp.confs[j].flagName) + return ii < jj + }) + fmt.Println(printKomponentCode(komp)) + } +} + +func checkForKnownFlags(allKomponents []*komponent, knownFlags []string) { var knownFlagsClone []string knownFlagsClone = append(knownFlagsClone, knownFlags...) for _, komponent := range allKomponents { @@ -354,9 +401,17 @@ func unionKomponents(ks ...*komponent) *komponent { confs = append(confs, newConf) conf = newConf } else { - if conf.flagType != newConf.flagType { + if isKnownFlag(conf.flagName) && conf.flagType != newConf.flagType { + fmt.Fprintf(os.Stderr, "%s %s != %s\n", conf.flagName, conf.flagType, newConf.flagType) panic("TODO: different types across versions") } + if isKnownFlag(conf.flagName) && conf.flagDefault != newConf.flagDefault { + // special case for these flags for which the value itself is not important + if conf.flagName != "event-burst" && conf.flagName != "event-qps" { + fmt.Fprintf(os.Stderr, "%s %s != %s\n", conf.flagName, conf.flagDefault, newConf.flagDefault) + panic("TODO: different defaults across versions") + } + } } conf.versions = append(conf.versions, k.version) } @@ -375,7 +430,7 @@ func printKomponentCode(komp *komponent) string { printAssignment := func(c *conf, v string) string { switch c.goType { case "string", "ip": - return fmt.Sprintf("res.%s = %s", toGoField(c.flagName), v) + return fmt.Sprintf("v := %s\nres.%s = &v", v, toGoField(c.flagName)) case "bool": return fmt.Sprintf("res.%s = l.parseBool(%s)", toGoField(c.flagName), v) case "float64": @@ -420,8 +475,12 @@ func printKomponentCode(komp *komponent) string { if !isKnownFlag(c.flagName) { continue } - s += fmt.Sprintf(" %s %s `json:\"%s\"` // versions: %s\n", - toGoField(c.flagName), c.goType, toGoJSONTag(c.flagName), strings.Join(c.versions, ", ")) + goType := c.goType + if !strings.HasPrefix(goType, "*") && !strings.HasPrefix(goType, "[]") { + goType = "*" + goType + } + s += fmt.Sprintf(" %s %s `json:\"%s,omitempty\"` // versions: %s\n", + toGoField(c.flagName), goType, toGoJSONTag(c.flagName), strings.Join(c.versions, ", ")) } s += " SkippedFlags map[string]string `json:\"skippedFlags,omitempty\"`\n" s += "}\n" @@ -434,9 +493,26 @@ func printKomponentCode(komp *komponent) string { } s += fmt.Sprintf("if v, ok := flags[\"--%s\"]; ok {\n", c.flagName) s += fmt.Sprintf("delete(flags, \"--%s\")\n", c.flagName) - s += printAssignment(c, "v") + s += printAssignment(c, "v") + "\n" if c.flagDefault != "" { - s += "\n} else {\n" + // kube-apiserver and etcd components do not have any configuration file. + if komp.name != "kube-apiserver" && komp.name != "etcd" && komp.name != "kube-controller-manager" { + configCursor, ok := cliConfAssoc[komp.name+"."+c.flagName] + // Some components can be configured with both cli-args and a + // configuration file. We need to make sure the default value + // of a cli-args is filled only if the associated + // configuration in the config file is not setup. + if !ok { + panic(fmt.Errorf("missing %s configuration associated path to flag %q (default = %q)", komp.name, c.flagName, c.flagDefault)) + } + if configCursor != "" { + s += fmt.Sprintf("\n} else if !l.configFileMetaHasField(res.Config, %q) {\n", configCursor) + } else { + s += "\n} else {\n" + } + } else { + s += "\n} else {\n" + } s += printAssignment(c, fmt.Sprintf("%q", c.flagDefault)) } s += "}\n" @@ -663,7 +739,7 @@ func scanDefaultValue(str string, op, cl rune) string { func parseTypeBool(str string) string { if str == "" { - str = "false" + return "" } b, err := strconv.ParseBool(str) if err != nil { @@ -672,7 +748,7 @@ func parseTypeBool(str string) string { if b { return "true" } - return "" + return "false" } func parseTypeCIDRs(str string) string { @@ -690,7 +766,7 @@ func parseTypeCIDRs(str string) string { func parseTypeDuration(str string) string { if str == "" { - str = "0" + return "" } _, err := time.ParseDuration(str) if err != nil { @@ -701,7 +777,7 @@ func parseTypeDuration(str string) string { func parseTypeFloat(str string) string { if str == "" { - str = "0.0" + return "" } _, err := strconv.ParseFloat(str, 64) if err != nil { @@ -712,7 +788,7 @@ func parseTypeFloat(str string) string { func parseTypeNumber(str string) string { if str == "" { - str = "0" + return "" } _, err := strconv.Atoi(str) if err != nil { diff --git a/pkg/config/aliases.go b/pkg/config/aliases.go index ccdebd7d6a509..f1b18c6cb263f 100644 --- a/pkg/config/aliases.go +++ b/pkg/config/aliases.go @@ -3,14 +3,20 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. +// Package config defines the configuration of the agent package config import ( + "context" + slog "github.com/cihub/seelog" + "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/config/logs" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/util/optional" ) // Aliases to conf package @@ -72,6 +78,13 @@ const ( KubeOrchestratorExplorer = env.KubeOrchestratorExplorer ) +var ( + // Datadog Alias + Datadog = pkgconfigsetup.Datadog + // SystemProbe Alias + SystemProbe = pkgconfigsetup.SystemProbe +) + // IsAutoconfigEnabled is alias for model.IsAutoconfigEnabled func IsAutoconfigEnabled() bool { return env.IsAutoconfigEnabled(Datadog) @@ -79,10 +92,9 @@ func IsAutoconfigEnabled() bool { // Aliases for config overrides var ( - AddOverride = model.AddOverride - AddOverrides = model.AddOverrides - AddOverrideFunc = model.AddOverrideFunc - applyOverrideFuncs = model.ApplyOverrideFuncs + AddOverride = model.AddOverride + AddOverrides = model.AddOverrides + AddOverrideFunc = model.AddOverrideFunc ) // LoggerName Alias @@ -113,3 +125,141 @@ func GetSyslogURI() string { func SetupDogstatsdLogger(logFile string) (slog.LoggerInterface, error) { return logs.SetupDogstatsdLogger(logFile, Datadog) } + +// IsCloudProviderEnabled Alias using Datadog config +func IsCloudProviderEnabled(cloudProvider string) bool { + return pkgconfigsetup.IsCloudProviderEnabled(cloudProvider, Datadog) +} + +// GetIPCAddress Alias using Datadog config +func GetIPCAddress() (string, error) { + return pkgconfigsetup.GetIPCAddress(Datadog) +} + +// Datatype Aliases +const ( + Metrics = pkgconfigsetup.Metrics + Traces = pkgconfigsetup.Traces + Logs = pkgconfigsetup.Logs +) + +// Aliases for config defaults +const ( + DefaultForwarderRecoveryInterval = pkgconfigsetup.DefaultForwarderRecoveryInterval + DefaultAPIKeyValidationInterval = pkgconfigsetup.DefaultAPIKeyValidationInterval + DefaultBatchWait = pkgconfigsetup.DefaultBatchWait + DefaultInputChanSize = pkgconfigsetup.DefaultInputChanSize + DefaultBatchMaxConcurrentSend = pkgconfigsetup.DefaultBatchMaxConcurrentSend + DefaultBatchMaxContentSize = pkgconfigsetup.DefaultBatchMaxContentSize + DefaultLogsSenderBackoffRecoveryInterval = pkgconfigsetup.DefaultLogsSenderBackoffRecoveryInterval + DefaultLogsSenderBackoffMax = pkgconfigsetup.DefaultLogsSenderBackoffMax + DefaultLogsSenderBackoffFactor = pkgconfigsetup.DefaultLogsSenderBackoffFactor + DefaultLogsSenderBackoffBase = pkgconfigsetup.DefaultLogsSenderBackoffBase + DefaultBatchMaxSize = pkgconfigsetup.DefaultBatchMaxSize + DefaultNumWorkers = pkgconfigsetup.DefaultNumWorkers + MaxNumWorkers = pkgconfigsetup.MaxNumWorkers + DefaultSite = pkgconfigsetup.DefaultSite + OTLPTracePort = pkgconfigsetup.OTLPTracePort + DefaultAuditorTTL = pkgconfigsetup.DefaultAuditorTTL + DefaultMaxMessageSizeBytes = pkgconfigsetup.DefaultMaxMessageSizeBytes + DefaultProcessEntityStreamPort = pkgconfigsetup.DefaultProcessEntityStreamPort + DefaultProcessEventsCheckInterval = pkgconfigsetup.DefaultProcessEventsCheckInterval + DefaultProcessEventsMinCheckInterval = pkgconfigsetup.DefaultProcessEventsMinCheckInterval + ProcessMaxPerMessageLimit = pkgconfigsetup.ProcessMaxPerMessageLimit + DefaultProcessMaxPerMessage = pkgconfigsetup.DefaultProcessMaxPerMessage + ProcessMaxMessageBytesLimit = pkgconfigsetup.ProcessMaxMessageBytesLimit + DefaultProcessDiscoveryHintFrequency = pkgconfigsetup.DefaultProcessDiscoveryHintFrequency + DefaultProcessMaxMessageBytes = pkgconfigsetup.DefaultProcessMaxMessageBytes + DefaultProcessExpVarPort = pkgconfigsetup.DefaultProcessExpVarPort + DefaultProcessQueueBytes = pkgconfigsetup.DefaultProcessQueueBytes + DefaultProcessQueueSize = pkgconfigsetup.DefaultProcessQueueSize + DefaultProcessRTQueueSize = pkgconfigsetup.DefaultProcessRTQueueSize + DefaultRuntimePoliciesDir = pkgconfigsetup.DefaultRuntimePoliciesDir + DefaultGRPCConnectionTimeoutSecs = pkgconfigsetup.DefaultGRPCConnectionTimeoutSecs + DefaultProcessEndpoint = pkgconfigsetup.DefaultProcessEndpoint + DefaultProcessEventsEndpoint = pkgconfigsetup.DefaultProcessEventsEndpoint +) + +type ( + // ConfigurationProviders Alias + ConfigurationProviders = pkgconfigsetup.ConfigurationProviders + // Listeners Alias + Listeners = pkgconfigsetup.Listeners + // MappingProfile Alias + MappingProfile = pkgconfigsetup.MappingProfile + // Endpoint Alias + Endpoint = pkgconfigsetup.Endpoint +) + +// GetObsPipelineURL Alias using Datadog config +func GetObsPipelineURL(datatype pkgconfigsetup.DataType) (string, error) { + return pkgconfigsetup.GetObsPipelineURL(datatype, Datadog) +} + +// LoadCustom Alias +func LoadCustom(config model.Config, origin string, secretResolver optional.Option[secrets.Component], additionalKnownEnvVars []string) (*model.Warnings, error) { + return pkgconfigsetup.LoadCustom(config, origin, secretResolver, additionalKnownEnvVars) +} + +// LoadDatadogCustom Alias +func LoadDatadogCustom(config model.Config, origin string, secretResolver optional.Option[secrets.Component], additionalKnownEnvVars []string) (*model.Warnings, error) { + return pkgconfigsetup.LoadDatadogCustom(config, origin, secretResolver, additionalKnownEnvVars) +} + +// GetValidHostAliases Alias using Datadog config +func GetValidHostAliases(ctx context.Context) ([]string, error) { + return pkgconfigsetup.GetValidHostAliases(ctx, Datadog) +} + +// IsCLCRunner Alias using Datadog config +func IsCLCRunner() bool { + return pkgconfigsetup.IsCLCRunner(Datadog) +} + +// GetBindHostFromConfig Alias using Datadog config +func GetBindHostFromConfig(config model.Reader) string { + return pkgconfigsetup.GetBindHostFromConfig(config) +} + +// GetBindHost Alias using Datadog config +func GetBindHost() string { + return pkgconfigsetup.GetBindHost(Datadog) +} + +// GetDogstatsdMappingProfiles Alias using Datadog config +func GetDogstatsdMappingProfiles() ([]MappingProfile, error) { + return pkgconfigsetup.GetDogstatsdMappingProfiles(Datadog) +} + +var ( + // IsRemoteConfigEnabled Alias + IsRemoteConfigEnabled = pkgconfigsetup.IsRemoteConfigEnabled + // StartTime Alias + StartTime = pkgconfigsetup.StartTime + // StandardJMXIntegrations Alias + StandardJMXIntegrations = pkgconfigsetup.StandardJMXIntegrations + // SetupOTLP Alias + SetupOTLP = pkgconfigsetup.OTLP + // InitSystemProbeConfig Alias + InitSystemProbeConfig = pkgconfigsetup.InitSystemProbeConfig + // InitConfig Alias + InitConfig = pkgconfigsetup.InitConfig + + // GetRemoteConfigurationAllowedIntegrations Alias + GetRemoteConfigurationAllowedIntegrations = pkgconfigsetup.GetRemoteConfigurationAllowedIntegrations + // LoadProxyFromEnv Alias + LoadProxyFromEnv = pkgconfigsetup.LoadProxyFromEnv + + // GetIPCPort Alias + GetIPCPort = pkgconfigsetup.GetIPCPort +) + +// LoadWithoutSecret Alias using Datadog config +func LoadWithoutSecret() (*model.Warnings, error) { + return pkgconfigsetup.LoadDatadogCustom(Datadog, "datadog.yaml", optional.NewNoneOption[secrets.Component](), SystemProbe.GetEnvVars()) +} + +// GetProcessAPIAddressPort Alias using Datadog config +func GetProcessAPIAddressPort() (string, error) { + return pkgconfigsetup.GetProcessAPIAddressPort(Datadog) +} diff --git a/pkg/config/aliases_darwin.go b/pkg/config/aliases_darwin.go new file mode 100644 index 0000000000000..50065415c35db --- /dev/null +++ b/pkg/config/aliases_darwin.go @@ -0,0 +1,17 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package config + +import ( + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" +) + +// Aliases to setup package +const ( + DefaultSecurityAgentLogFile = pkgconfigsetup.DefaultSecurityAgentLogFile + DefaultProcessAgentLogFile = pkgconfigsetup.DefaultProcessAgentLogFile + DefaultDDAgentBin = pkgconfigsetup.DefaultDDAgentBin +) diff --git a/pkg/config/aliases_nix.go b/pkg/config/aliases_nix.go new file mode 100644 index 0000000000000..bc1edec08ea71 --- /dev/null +++ b/pkg/config/aliases_nix.go @@ -0,0 +1,19 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux || freebsd || netbsd || openbsd || solaris || dragonfly || aix + +package config + +import ( + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" +) + +// Aliases to setup package +const ( + DefaultSecurityAgentLogFile = pkgconfigsetup.DefaultSecurityAgentLogFile + DefaultProcessAgentLogFile = pkgconfigsetup.DefaultProcessAgentLogFile + DefaultDDAgentBin = pkgconfigsetup.DefaultDDAgentBin +) diff --git a/pkg/config/aliases_windows.go b/pkg/config/aliases_windows.go new file mode 100644 index 0000000000000..eccac48a83b76 --- /dev/null +++ b/pkg/config/aliases_windows.go @@ -0,0 +1,17 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package config + +import ( + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" +) + +// Aliases to setup package +var ( + DefaultSecurityAgentLogFile = pkgconfigsetup.DefaultSecurityAgentLogFile + DefaultProcessAgentLogFile = pkgconfigsetup.DefaultProcessAgentLogFile + DefaultDDAgentBin = pkgconfigsetup.DefaultDDAgentBin +) diff --git a/pkg/config/consts.go b/pkg/config/consts.go new file mode 100644 index 0000000000000..29200ba9132b4 --- /dev/null +++ b/pkg/config/consts.go @@ -0,0 +1,11 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package config + +const ( + // ClusterIDCacheKey is the key name for the orchestrator cluster id in the agent in-mem cache + ClusterIDCacheKey = "orchestratorClusterID" +) diff --git a/pkg/config/legacy_converter.go b/pkg/config/legacy_converter.go index 62c776cee5e64..b255a43f16862 100644 --- a/pkg/config/legacy_converter.go +++ b/pkg/config/legacy_converter.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // LegacyConfigConverter is used in the legacy package @@ -27,6 +28,6 @@ func NewConfigConverter() *LegacyConfigConverter { // Configure Datadog global configuration Datadog = NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) // Configuration defaults - InitConfig(Datadog) + pkgconfigsetup.InitConfig(Datadog) return &LegacyConfigConverter{Datadog} } diff --git a/pkg/config/mock.go b/pkg/config/mock.go index 36dcd26993e6c..e42c7c0fb6772 100644 --- a/pkg/config/mock.go +++ b/pkg/config/mock.go @@ -11,6 +11,7 @@ import ( "testing" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) var ( @@ -60,7 +61,7 @@ func Mock(t testing.TB) *MockConfig { // Configure Datadog global configuration Datadog = NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) // Configuration defaults - InitConfig(Datadog) + pkgconfigsetup.InitConfig(Datadog) return &MockConfig{Datadog} } @@ -90,6 +91,6 @@ func MockSystemProbe(t testing.TB) *MockConfig { // Configure Datadog global configuration SystemProbe = NewConfig("system-probe", "DD", strings.NewReplacer(".", "_")) // Configuration defaults - InitSystemProbeConfig(SystemProbe) + pkgconfigsetup.InitSystemProbeConfig(SystemProbe) return &MockConfig{SystemProbe} } diff --git a/pkg/config/model/types.go b/pkg/config/model/types.go index a5349de7b6655..15dd503af971e 100644 --- a/pkg/config/model/types.go +++ b/pkg/config/model/types.go @@ -22,6 +22,11 @@ type Proxy struct { NoProxy []string `mapstructure:"no_proxy"` } +// NotificationReceiver represents the callback type to receive notifications each time the `Set` method is called. The +// configuration will call each NotificationReceiver registered through the 'OnUpdate' method, therefore +// 'NotificationReceiver' should not be blocking. +type NotificationReceiver func(key string) + // Reader is a subset of Config that only allows reading of configuration type Reader interface { Get(key string) interface{} @@ -77,6 +82,10 @@ type Reader interface { // Object returns Reader to config (completes config.Component interface) Object() Reader + + // OnUpdate adds a callback to the list receivers to be called each time a value is change in the configuration + // by a call to the 'Set' method. The configuration will sequentially call each receiver. + OnUpdate(callback NotificationReceiver) } // Writer is a subset of Config that only allows writing the configuration diff --git a/pkg/config/model/viper.go b/pkg/config/model/viper.go index 4dac9baba20e6..e0eae02686420 100644 --- a/pkg/config/model/viper.go +++ b/pkg/config/model/viper.go @@ -63,6 +63,8 @@ type safeConfig struct { envPrefix string envKeyReplacer *strings.Replacer + notificationReceivers []NotificationReceiver + // Proxy settings proxiesOnce sync.Once proxies *Proxy @@ -72,6 +74,14 @@ type safeConfig struct { configEnvVars map[string]struct{} } +// OnUpdate adds a callback to the list receivers to be called each time a value is change in the configuration +// by a call to the 'Set' method. +func (c *safeConfig) OnUpdate(callback NotificationReceiver) { + c.Lock() + defer c.Unlock() + c.notificationReceivers = append(c.notificationReceivers, callback) +} + // Set wraps Viper for concurrent access func (c *safeConfig) Set(key string, value interface{}, source Source) { if source == SourceDefault { @@ -83,6 +93,11 @@ func (c *safeConfig) Set(key string, value interface{}, source Source) { defer c.Unlock() c.configSources[source].Set(key, value) c.mergeViperInstances(key) + + // notifying all receiver about the updated setting + for _, receiver := range c.notificationReceivers { + receiver(key) + } } // SetWithoutSource sets the given value using source Unknown diff --git a/pkg/config/model/viper_test.go b/pkg/config/model/viper_test.go index 1c3b4c93fe2e6..073d2e5a6c41c 100644 --- a/pkg/config/model/viper_test.go +++ b/pkg/config/model/viper_test.go @@ -262,3 +262,22 @@ foo: bar assert.Equal(t, SourceFile, config.GetSource("foo")) assert.Equal(t, map[string]interface{}{"foo": "bar"}, config.AllSourceSettingsWithoutDefault(SourceFile)) } + +func TestNotification(t *testing.T) { + config := NewConfig("test", "DD", strings.NewReplacer(".", "_")) + + updatedKeyCB1 := []string{} + updatedKeyCB2 := []string{} + + config.OnUpdate(func(key string) { updatedKeyCB1 = append(updatedKeyCB1, key) }) + + config.Set("foo", "bar", SourceFile) + assert.Equal(t, []string{"foo"}, updatedKeyCB1) + + config.OnUpdate(func(key string) { updatedKeyCB2 = append(updatedKeyCB2, key) }) + + config.Set("foo", "bar2", SourceFile) + config.Set("foo2", "bar2", SourceFile) + assert.Equal(t, []string{"foo", "foo", "foo2"}, updatedKeyCB1) + assert.Equal(t, []string{"foo", "foo2"}, updatedKeyCB2) +} diff --git a/pkg/config/settings/runtime_setting_log_level.go b/pkg/config/settings/runtime_setting_log_level.go index 99532cdd8e96f..cf5103ad1ac3d 100644 --- a/pkg/config/settings/runtime_setting_log_level.go +++ b/pkg/config/settings/runtime_setting_log_level.go @@ -6,7 +6,6 @@ package settings import ( - "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" "github.com/DataDog/datadog-agent/pkg/config" pkgconfiglogs "github.com/DataDog/datadog-agent/pkg/config/logs" "github.com/DataDog/datadog-agent/pkg/config/model" @@ -17,16 +16,12 @@ import ( type LogLevelRuntimeSetting struct { Config config.ReaderWriter ConfigKey string - // invAgent is a temporary dependency until the configuration is capable of sending it's own notification upon - // a value being set. - invAgent inventoryagent.Component } // NewLogLevelRuntimeSetting returns a new LogLevelRuntimeSetting -func NewLogLevelRuntimeSetting(invAgent inventoryagent.Component) *LogLevelRuntimeSetting { +func NewLogLevelRuntimeSetting() *LogLevelRuntimeSetting { return &LogLevelRuntimeSetting{ ConfigKey: "log_level", - invAgent: invAgent, } } @@ -72,9 +67,5 @@ func (l *LogLevelRuntimeSetting) Set(v interface{}, source model.Source) error { cfg = l.Config } cfg.Set(key, level, source) - // we trigger a new inventory metadata payload since the configuration was updated by the user. - if l.invAgent != nil { - l.invAgent.Refresh() - } return nil } diff --git a/pkg/config/apm.go b/pkg/config/setup/apm.go similarity index 99% rename from pkg/config/apm.go rename to pkg/config/setup/apm.go index 8dcfc4fe164f5..652ac11c05e80 100644 --- a/pkg/config/apm.go +++ b/pkg/config/setup/apm.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package config +package setup import ( "encoding/csv" @@ -13,13 +13,14 @@ import ( "strconv" "strings" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" ) // Traces specifies the data type used for Vector override. See https://vector.dev/docs/reference/configuration/sources/datadog_agent/ for additional details. const Traces DataType = "traces" -func setupAPM(config Config) { +func setupAPM(config pkgconfigmodel.Config) { config.BindEnv("apm_config.obfuscation.elasticsearch.enabled", "DD_APM_OBFUSCATION_ELASTICSEARCH_ENABLED") config.BindEnv("apm_config.obfuscation.elasticsearch.keep_values", "DD_APM_OBFUSCATION_ELASTICSEARCH_KEEP_VALUES") config.BindEnv("apm_config.obfuscation.elasticsearch.obfuscate_sql_values", "DD_APM_OBFUSCATION_ELASTICSEARCH_OBFUSCATE_SQL_VALUES") diff --git a/pkg/config/config.go b/pkg/config/setup/config.go similarity index 95% rename from pkg/config/config.go rename to pkg/config/setup/config.go index 74dca35f649dc..38ad099507139 100644 --- a/pkg/config/config.go +++ b/pkg/config/setup/config.go @@ -3,8 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package config defines the configuration of the agent -package config +// Package setup defines the configuration of the agent +package setup import ( "context" @@ -20,16 +20,14 @@ import ( "strings" "time" - "gopkg.in/yaml.v2" - "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/pkg/collector/check/defaults" pkgconfigenv "github.com/DataDog/datadog-agent/pkg/config/env" pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" - "github.com/DataDog/datadog-agent/pkg/util/optional" - "github.com/DataDog/datadog-agent/pkg/util/hostname/validate" "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/util/optional" + "gopkg.in/yaml.v2" ) const ( @@ -69,9 +67,6 @@ const ( // DefaultAuditorTTL is the default logs auditor TTL in hours DefaultAuditorTTL = 23 - // ClusterIDCacheKey is the key name for the orchestrator cluster id in the agent in-mem cache - ClusterIDCacheKey = "orchestratorClusterID" - // DefaultRuntimePoliciesDir is the default policies directory used by the runtime security module DefaultRuntimePoliciesDir = "/etc/datadog-agent/runtime-security.d" @@ -101,8 +96,8 @@ const ( // Datadog is the global configuration object var ( - Datadog Config - SystemProbe Config + Datadog pkgconfigmodel.Config + SystemProbe pkgconfigmodel.Config ) // Variables to initialize at build time @@ -199,15 +194,15 @@ const ( func init() { osinit() // Configure Datadog global configuration - Datadog = NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) - SystemProbe = NewConfig("system-probe", "DD", strings.NewReplacer(".", "_")) + Datadog = pkgconfigmodel.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + SystemProbe = pkgconfigmodel.NewConfig("system-probe", "DD", strings.NewReplacer(".", "_")) // Configuration defaults InitConfig(Datadog) InitSystemProbeConfig(SystemProbe) } // InitConfig initializes the config defaults on a config -func InitConfig(config Config) { +func InitConfig(config pkgconfigmodel.Config) { // Agent // Don't set a default on 'site' to allow detecting with viper whether it's set in config config.BindEnv("site") @@ -257,6 +252,7 @@ func InitConfig(config Config) { config.BindEnvAndSetDefault("enable_metadata_collection", true) config.BindEnvAndSetDefault("enable_gohai", true) config.BindEnvAndSetDefault("check_runners", int64(4)) + config.BindEnvAndSetDefault("check_cancel_timeout", 500*time.Millisecond) config.BindEnvAndSetDefault("auth_token_file_path", "") config.BindEnv("bind_host") config.BindEnvAndSetDefault("health_port", int64(0)) @@ -385,7 +381,7 @@ func InitConfig(config Config) { // Agent GUI access port config.BindEnvAndSetDefault("GUI_port", defaultGuiPort) - if IsContainerized() { + if pkgconfigenv.IsContainerized() { // In serverless-containerized environments (e.g Fargate) // it's impossible to mount host volumes. // Make sure the host paths exist before setting-up the default values. @@ -648,6 +644,7 @@ func InitConfig(config Config) { config.BindEnvAndSetDefault("kubernetes_collect_metadata_tags", true) config.BindEnvAndSetDefault("kubernetes_metadata_tag_update_freq", 60) // Polling frequency of the Agent to the DCA in seconds (gets the local cache if the DCA is disabled) config.BindEnvAndSetDefault("kubernetes_apiserver_client_timeout", 10) + config.BindEnvAndSetDefault("kubernetes_apiserver_informer_client_timeout", 0) config.BindEnvAndSetDefault("kubernetes_map_services_on_ip", false) // temporary opt-out of the new mapping logic config.BindEnvAndSetDefault("kubernetes_apiserver_use_protobuf", false) config.BindEnvAndSetDefault("kubernetes_ad_tags_disabled", []string{}) @@ -997,7 +994,7 @@ func InitConfig(config Config) { config.BindEnvAndSetDefault("external_metrics_provider.local_copy_refresh_rate", 30) // value in seconds config.BindEnvAndSetDefault("external_metrics_provider.chunk_size", 35) // Maximum number of queries to batch when querying Datadog. config.BindEnvAndSetDefault("external_metrics_provider.split_batches_with_backoff", false) // Splits batches and runs queries with errors individually with an exponential backoff - AddOverrideFunc(sanitizeExternalMetricsProviderChunkSize) + pkgconfigmodel.AddOverrideFunc(sanitizeExternalMetricsProviderChunkSize) // Cluster check Autodiscovery config.BindEnvAndSetDefault("cluster_checks.enabled", false) config.BindEnvAndSetDefault("cluster_checks.node_expiration_timeout", 30) // value in seconds @@ -1009,6 +1006,7 @@ func InitConfig(config Config) { config.BindEnvAndSetDefault("cluster_checks.rebalance_min_percentage_improvement", 10) // Experimental. Subject to change. Rebalance only if the distribution found improves the current one by this. config.BindEnvAndSetDefault("cluster_checks.clc_runners_port", 5005) config.BindEnvAndSetDefault("cluster_checks.exclude_checks", []string{}) + config.BindEnvAndSetDefault("cluster_checks.exclude_checks_from_dispatching", []string{}) // Cluster check runner config.BindEnvAndSetDefault("clc_runner_enabled", false) @@ -1269,12 +1267,12 @@ func InitConfig(config Config) { config.BindEnvAndSetDefault("language_detection.client_period", "10s") setupAPM(config) - SetupOTLP(config) + OTLP(config) setupProcesses(config) } // LoadProxyFromEnv overrides the proxy settings with environment variables -func LoadProxyFromEnv(config Config) { +func LoadProxyFromEnv(config pkgconfigmodel.Config) { // Viper doesn't handle mixing nested variables from files and set // manually. If we manually set one of the sub value for "proxy" all // other values from the conf file will be shadowed when using @@ -1308,7 +1306,7 @@ func LoadProxyFromEnv(config Config) { } var isSet bool - p := &Proxy{} + p := &pkgconfigmodel.Proxy{} if isSet = config.IsSet("proxy"); isSet { if err := config.UnmarshalKey("proxy", p); err != nil { isSet = false @@ -1367,20 +1365,20 @@ func LoadProxyFromEnv(config Config) { } // LoadWithoutSecret reads configs files, initializes the config module without decrypting any secrets -func LoadWithoutSecret() (*Warnings, error) { - return LoadDatadogCustom(Datadog, "datadog.yaml", optional.NewNoneOption[secrets.Component](), SystemProbe.GetEnvVars()) +func LoadWithoutSecret(config pkgconfigmodel.Config, additionalEnvVars []string) (*pkgconfigmodel.Warnings, error) { + return LoadDatadogCustom(config, "datadog.yaml", optional.NewNoneOption[secrets.Component](), additionalEnvVars) } // LoadWithSecret reads config files and initializes config with decrypted secrets -func LoadWithSecret(secretResolver secrets.Component) (*Warnings, error) { - return LoadDatadogCustom(Datadog, "datadog.yaml", optional.NewOption[secrets.Component](secretResolver), SystemProbe.GetEnvVars()) +func LoadWithSecret(config pkgconfigmodel.Config, secretResolver secrets.Component, additionalEnvVars []string) (*pkgconfigmodel.Warnings, error) { + return LoadDatadogCustom(config, "datadog.yaml", optional.NewOption[secrets.Component](secretResolver), additionalEnvVars) } // Merge will merge additional configuration into an existing configuration -func Merge(configPaths []string) error { +func Merge(configPaths []string, config pkgconfigmodel.Config) error { for _, configPath := range configPaths { if f, err := os.Open(configPath); err == nil { - err = Datadog.MergeConfig(f) + err = config.MergeConfig(f) _ = f.Close() if err != nil { return fmt.Errorf("error merging %s config file: %w", configPath, err) @@ -1393,7 +1391,7 @@ func Merge(configPaths []string) error { return nil } -func findUnknownKeys(config Config) []string { +func findUnknownKeys(config pkgconfigmodel.Config) []string { var unknownKeys []string knownKeys := config.GetKnownKeys() loadedKeys := config.AllKeys() @@ -1416,7 +1414,7 @@ func findUnknownKeys(config Config) []string { return unknownKeys } -func findUnexpectedUnicode(config Config) []string { +func findUnexpectedUnicode(config pkgconfigmodel.Config) []string { messages := make([]string, 0) checkAndRecordString := func(str string, prefix string) { if res := FindUnexpectedUnicode(str); len(res) != 0 { @@ -1454,7 +1452,7 @@ func findUnexpectedUnicode(config Config) []string { return messages } -func findUnknownEnvVars(config Config, environ []string, additionalKnownEnvVars []string) []string { +func findUnknownEnvVars(config pkgconfigmodel.Config, environ []string, additionalKnownEnvVars []string) []string { var unknownVars []string knownVars := map[string]struct{}{ @@ -1500,8 +1498,8 @@ func findUnknownEnvVars(config Config, environ []string, additionalKnownEnvVars return unknownVars } -func useHostEtc(config Config) { - if IsContainerized() && pathExists("/host/etc") { +func useHostEtc(config pkgconfigmodel.Config) { + if pkgconfigenv.IsContainerized() && pathExists("/host/etc") { if !config.GetBool("ignore_host_etc") { if val, isSet := os.LookupEnv("HOST_ETC"); !isSet { // We want to detect the host distro informations instead of the one from the container. @@ -1518,7 +1516,7 @@ func useHostEtc(config Config) { } } -func checkConflictingOptions(config Config) error { +func checkConflictingOptions(config pkgconfigmodel.Config) error { // Verify that either use_podman_logs OR docker_path_override are set since they conflict if config.GetBool("logs_config.use_podman_logs") && len(config.GetString("logs_config.docker_path_override")) > 0 { log.Warnf("'use_podman_logs' is set to true and 'docker_path_override' is set, please use one or the other") @@ -1529,7 +1527,7 @@ func checkConflictingOptions(config Config) error { } // LoadDatadogCustom loads the datadog config in the given config -func LoadDatadogCustom(config Config, origin string, secretResolver optional.Option[secrets.Component], additionalKnownEnvVars []string) (*Warnings, error) { +func LoadDatadogCustom(config pkgconfigmodel.Config, origin string, secretResolver optional.Option[secrets.Component], additionalKnownEnvVars []string) (*pkgconfigmodel.Warnings, error) { // Feature detection running in a defer func as it always need to run (whether config load has been successful or not) // Because some Agents (e.g. trace-agent) will run even if config file does not exist defer func() { @@ -1562,7 +1560,7 @@ func LoadDatadogCustom(config Config, origin string, secretResolver optional.Opt log.Warnf("Python version has been forced to %s", DefaultPython) } - AddOverride("python_version", DefaultPython) + pkgconfigmodel.AddOverride("python_version", DefaultPython) } sanitizeAPIKeyConfig(config, "api_key") @@ -1574,11 +1572,11 @@ func LoadDatadogCustom(config Config, origin string, secretResolver optional.Opt } // LoadCustom reads config into the provided config object -func LoadCustom(config Config, origin string, secretResolver optional.Option[secrets.Component], additionalKnownEnvVars []string) (*Warnings, error) { - warnings := Warnings{} +func LoadCustom(config pkgconfigmodel.Config, origin string, secretResolver optional.Option[secrets.Component], additionalKnownEnvVars []string) (*pkgconfigmodel.Warnings, error) { + warnings := pkgconfigmodel.Warnings{} if err := config.ReadInConfig(); err != nil { - if IsServerless() { + if pkgconfigenv.IsServerless() { log.Debug("No config file detected, using environment variable based configuration only") // Proxy settings need to be loaded from environment variables even in the absence of a datadog.yaml file // The remaining code in LoadCustom is not run to keep a low cold start time @@ -1622,7 +1620,7 @@ func LoadCustom(config Config, origin string, secretResolver optional.Option[sec // setupFipsEndpoints overwrites the Agent endpoint for outgoing data to be sent to the local FIPS proxy. The local FIPS // proxy will be in charge of forwarding data to the Datadog backend following FIPS standard. Starting from // fips.port_range_start we will assign a dedicated port per product (metrics, logs, traces, ...). -func setupFipsEndpoints(config Config) error { +func setupFipsEndpoints(config pkgconfigmodel.Config) error { // Each port is dedicated to a specific data type: // // port_range_start: HAProxy stats @@ -1736,7 +1734,7 @@ func setupFipsEndpoints(config Config) error { return nil } -func setupFipsLogsConfig(config Config, configPrefix string, url string) { +func setupFipsLogsConfig(config pkgconfigmodel.Config, configPrefix string, url string) { config.Set(configPrefix+"use_http", true, pkgconfigmodel.SourceAgentRuntime) config.Set(configPrefix+"logs_no_ssl", !config.GetBool("fips.https"), pkgconfigmodel.SourceAgentRuntime) config.Set(configPrefix+"logs_dd_url", url, pkgconfigmodel.SourceAgentRuntime) @@ -1745,7 +1743,7 @@ func setupFipsLogsConfig(config Config, configPrefix string, url string) { // ResolveSecrets merges all the secret values from origin into config. Secret values // are identified by a value of the form "ENC[key]" where key is the secret key. // See: https://github.com/DataDog/datadog-agent/blob/main/docs/agent/secrets.md -func ResolveSecrets(config Config, secretResolver secrets.Component, origin string) error { +func ResolveSecrets(config pkgconfigmodel.Config, secretResolver secrets.Component, origin string) error { // We have to init the secrets package before we can use it to decrypt // anything. secretResolver.Configure( @@ -1808,7 +1806,7 @@ func EnvVarAreSetAndNotEqual(lhsName string, rhsName string) bool { } // sanitizeAPIKeyConfig strips newlines and other control characters from a given key. -func sanitizeAPIKeyConfig(config Config, key string) { +func sanitizeAPIKeyConfig(config pkgconfigmodel.Config, key string) { if !config.IsKnown(key) || !config.IsSet(key) { return } @@ -1816,7 +1814,7 @@ func sanitizeAPIKeyConfig(config Config, key string) { } // sanitizeExternalMetricsProviderChunkSize ensures the value of `external_metrics_provider.chunk_size` is within an acceptable range -func sanitizeExternalMetricsProviderChunkSize(config Config) { +func sanitizeExternalMetricsProviderChunkSize(config pkgconfigmodel.Config) { if !config.IsKnown("external_metrics_provider.chunk_size") { return } @@ -1832,7 +1830,7 @@ func sanitizeExternalMetricsProviderChunkSize(config Config) { } } -func bindEnvAndSetLogsConfigKeys(config Config, prefix string) { +func bindEnvAndSetLogsConfigKeys(config pkgconfigmodel.Config, prefix string) { config.BindEnv(prefix + "logs_dd_url") // Send the logs to a proxy. Must respect format ':' and '' to be an integer config.BindEnv(prefix + "dd_url") config.BindEnv(prefix + "additional_endpoints") @@ -1856,8 +1854,8 @@ func bindEnvAndSetLogsConfigKeys(config Config, prefix string) { // IsCloudProviderEnabled checks the cloud provider family provided in // pkg/util/.go against the value for cloud_provider: on the // global config object Datadog -func IsCloudProviderEnabled(cloudProviderName string) bool { - cloudProviderFromConfig := Datadog.GetStringSlice("cloud_provider_metadata") +func IsCloudProviderEnabled(cloudProviderName string, config pkgconfigmodel.Reader) bool { + cloudProviderFromConfig := config.GetStringSlice("cloud_provider_metadata") for _, cloudName := range cloudProviderFromConfig { if strings.EqualFold(cloudName, cloudProviderName) { @@ -1882,7 +1880,7 @@ func pathExists(path string) bool { // setTracemallocEnabled is a helper to get the effective tracemalloc // configuration. -func setTracemallocEnabled(config Config) bool { +func setTracemallocEnabled(config pkgconfigmodel.Config) bool { if !config.IsKnown("tracemalloc_debug") { return false } @@ -1903,7 +1901,7 @@ func setTracemallocEnabled(config Config) bool { // setNumWorkers is a helper to set the effective number of workers for // a given config. -func setNumWorkers(config Config) { +func setNumWorkers(config pkgconfigmodel.Config) { if !config.IsKnown("check_runners") { return } @@ -1920,11 +1918,11 @@ func setNumWorkers(config Config) { } // GetDogstatsdMappingProfiles returns mapping profiles used in DogStatsD mapper -func GetDogstatsdMappingProfiles() ([]MappingProfile, error) { - return getDogstatsdMappingProfilesConfig(Datadog) +func GetDogstatsdMappingProfiles(config pkgconfigmodel.Reader) ([]MappingProfile, error) { + return getDogstatsdMappingProfilesConfig(config) } -func getDogstatsdMappingProfilesConfig(config Config) ([]MappingProfile, error) { +func getDogstatsdMappingProfilesConfig(config pkgconfigmodel.Reader) ([]MappingProfile, error) { var mappings []MappingProfile if config.IsSet("dogstatsd_mapper_profiles") { err := config.UnmarshalKey("dogstatsd_mapper_profiles", &mappings) @@ -1936,17 +1934,17 @@ func getDogstatsdMappingProfilesConfig(config Config) ([]MappingProfile, error) } // IsCLCRunner returns whether the Agent is in cluster check runner mode -func IsCLCRunner() bool { - if !Datadog.GetBool("clc_runner_enabled") { +func IsCLCRunner(config pkgconfigmodel.Reader) bool { + if !config.GetBool("clc_runner_enabled") { return false } var cps []ConfigurationProviders - if err := Datadog.UnmarshalKey("config_providers", &cps); err != nil { + if err := config.UnmarshalKey("config_providers", &cps); err != nil { return false } - for _, name := range Datadog.GetStringSlice("extra_config_providers") { + for _, name := range config.GetStringSlice("extra_config_providers") { cps = append(cps, ConfigurationProviders{Name: name}) } @@ -1968,12 +1966,12 @@ func IsCLCRunner() bool { // GetBindHost returns `bind_host` variable or default value // Not using `config.BindEnvAndSetDefault` as some processes need to know // if value was default one or not (e.g. trace-agent) -func GetBindHost() string { - return GetBindHostFromConfig(Datadog) +func GetBindHost(config pkgconfigmodel.Reader) string { + return GetBindHostFromConfig(config) } // GetBindHostFromConfig returns the bind_host value from the config -func GetBindHostFromConfig(cfg Reader) string { +func GetBindHostFromConfig(cfg pkgconfigmodel.Reader) string { if cfg.IsSet("bind_host") { return cfg.GetString("bind_host") } @@ -1982,11 +1980,11 @@ func GetBindHostFromConfig(cfg Reader) string { // GetValidHostAliases validates host aliases set in `host_aliases` variable and returns // only valid ones. -func GetValidHostAliases(_ context.Context) ([]string, error) { - return getValidHostAliasesWithConfig(Datadog), nil +func GetValidHostAliases(_ context.Context, config pkgconfigmodel.Reader) ([]string, error) { + return getValidHostAliasesWithConfig(config), nil } -func getValidHostAliasesWithConfig(config Config) []string { +func getValidHostAliasesWithConfig(config pkgconfigmodel.Reader) []string { aliases := []string{} for _, alias := range config.GetStringSlice("host_aliases") { if err := validate.ValidHostname(alias); err == nil { @@ -1999,7 +1997,7 @@ func getValidHostAliasesWithConfig(config Config) []string { return aliases } -func bindVectorOptions(config Config, datatype DataType) { +func bindVectorOptions(config pkgconfigmodel.Config, datatype DataType) { config.BindEnvAndSetDefault(fmt.Sprintf("observability_pipelines_worker.%s.enabled", datatype), false) config.BindEnvAndSetDefault(fmt.Sprintf("observability_pipelines_worker.%s.url", datatype), "") @@ -2008,19 +2006,19 @@ func bindVectorOptions(config Config, datatype DataType) { } // GetObsPipelineURL returns the URL under the 'observability_pipelines_worker.' prefix for the given datatype -func GetObsPipelineURL(datatype DataType) (string, error) { - if Datadog.GetBool(fmt.Sprintf("observability_pipelines_worker.%s.enabled", datatype)) { - return getObsPipelineURLForPrefix(datatype, "observability_pipelines_worker") - } else if Datadog.GetBool(fmt.Sprintf("vector.%s.enabled", datatype)) { +func GetObsPipelineURL(datatype DataType, config pkgconfigmodel.Reader) (string, error) { + if config.GetBool(fmt.Sprintf("observability_pipelines_worker.%s.enabled", datatype)) { + return getObsPipelineURLForPrefix(datatype, "observability_pipelines_worker", config) + } else if config.GetBool(fmt.Sprintf("vector.%s.enabled", datatype)) { // Fallback to the `vector` config if observability_pipelines_worker is not set. - return getObsPipelineURLForPrefix(datatype, "vector") + return getObsPipelineURLForPrefix(datatype, "vector", config) } return "", nil } -func getObsPipelineURLForPrefix(datatype DataType, prefix string) (string, error) { - if Datadog.GetBool(fmt.Sprintf("%s.%s.enabled", prefix, datatype)) { - pipelineURL := Datadog.GetString(fmt.Sprintf("%s.%s.url", prefix, datatype)) +func getObsPipelineURLForPrefix(datatype DataType, prefix string, config pkgconfigmodel.Reader) (string, error) { + if config.GetBool(fmt.Sprintf("%s.%s.enabled", prefix, datatype)) { + pipelineURL := config.GetString(fmt.Sprintf("%s.%s.url", prefix, datatype)) if pipelineURL == "" { log.Errorf("%s.%s.enabled is set to true, but %s.%s.url is empty", prefix, datatype, prefix, datatype) return "", nil @@ -2035,7 +2033,7 @@ func getObsPipelineURLForPrefix(datatype DataType, prefix string) (string, error } // IsRemoteConfigEnabled returns true if Remote Configuration should be enabled -func IsRemoteConfigEnabled(cfg Reader) bool { +func IsRemoteConfigEnabled(cfg pkgconfigmodel.Reader) bool { // Disable Remote Config for GovCloud if cfg.GetBool("fips.enabled") || cfg.GetString("site") == "ddog-gov.com" { return false @@ -2045,7 +2043,7 @@ func IsRemoteConfigEnabled(cfg Reader) bool { // GetRemoteConfigurationAllowedIntegrations returns the list of integrations that can be scheduled // with remote-config -func GetRemoteConfigurationAllowedIntegrations(cfg Reader) map[string]bool { +func GetRemoteConfigurationAllowedIntegrations(cfg pkgconfigmodel.Reader) map[string]bool { allowList := cfg.GetStringSlice("remote_configuration.agent_integrations.allow_list") allowMap := map[string]bool{} for _, integration := range allowList { diff --git a/pkg/config/config_darwin.go b/pkg/config/setup/config_darwin.go similarity index 98% rename from pkg/config/config_darwin.go rename to pkg/config/setup/config_darwin.go index d76c126e77e37..7a8272e7e3c3d 100644 --- a/pkg/config/config_darwin.go +++ b/pkg/config/setup/config_darwin.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package config +package setup const ( defaultConfdPath = "/opt/datadog-agent/etc/conf.d" diff --git a/pkg/config/config_nix.go b/pkg/config/setup/config_nix.go similarity index 98% rename from pkg/config/config_nix.go rename to pkg/config/setup/config_nix.go index 4a9fe00ffb4b7..a07a83c7d7bee 100644 --- a/pkg/config/config_nix.go +++ b/pkg/config/setup/config_nix.go @@ -5,7 +5,7 @@ //go:build linux || freebsd || netbsd || openbsd || solaris || dragonfly || aix -package config +package setup const ( defaultConfdPath = "/etc/datadog-agent/conf.d" diff --git a/pkg/config/config_secret_test.go b/pkg/config/setup/config_secret_test.go similarity index 83% rename from pkg/config/config_secret_test.go rename to pkg/config/setup/config_secret_test.go index 77bbbabcfb7cf..59b08e4d63b22 100644 --- a/pkg/config/config_secret_test.go +++ b/pkg/config/setup/config_secret_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package config +package setup import ( "os" @@ -12,6 +12,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/comp/core/secrets/secretsimpl" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/optional" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -37,14 +38,14 @@ process_config: func TestProxyWithSecret(t *testing.T) { type testCase struct { name string - setup func(t *testing.T, config Config, configPath string, resolver secrets.Mock) - tests func(t *testing.T, config Config) + setup func(t *testing.T, config pkgconfigmodel.Config, configPath string, resolver secrets.Mock) + tests func(t *testing.T, config pkgconfigmodel.Config) } cases := []testCase{ { name: "secrets from configuration for proxy", - setup: func(t *testing.T, config Config, configPath string, resolver secrets.Mock) { + setup: func(t *testing.T, config pkgconfigmodel.Config, configPath string, resolver secrets.Mock) { resolver.SetFetchHookFunc(func(_ []string) (map[string]string, error) { return map[string]string{ "http_handle": "http_url", @@ -59,9 +60,9 @@ func TestProxyWithSecret(t *testing.T) { config.SetWithoutSource("proxy.https", "ENC[https_handle]") config.SetWithoutSource("proxy.no_proxy", []string{"ENC[no_proxy_1_handle]", "ENC[no_proxy_2_handle]"}) }, - tests: func(t *testing.T, config Config) { + tests: func(t *testing.T, config pkgconfigmodel.Config) { assert.Equal(t, - &Proxy{ + &pkgconfigmodel.Proxy{ HTTP: "http_url", HTTPS: "https_url", NoProxy: []string{"no_proxy_1", "no_proxy_2"}, @@ -71,7 +72,7 @@ func TestProxyWithSecret(t *testing.T) { }, { name: "secrets fron DD env vars for proxy", - setup: func(t *testing.T, config Config, configPath string, resolver secrets.Mock) { + setup: func(t *testing.T, config pkgconfigmodel.Config, configPath string, resolver secrets.Mock) { resolver.SetFetchHookFunc(func(_ []string) (map[string]string, error) { return map[string]string{ "http_handle": "http_url", @@ -86,9 +87,9 @@ func TestProxyWithSecret(t *testing.T) { t.Setenv("DD_PROXY_HTTPS", "ENC[https_handle]") t.Setenv("DD_PROXY_NO_PROXY", "ENC[no_proxy_1_handle] ENC[no_proxy_2_handle]") }, - tests: func(t *testing.T, config Config) { + tests: func(t *testing.T, config pkgconfigmodel.Config) { assert.Equal(t, - &Proxy{ + &pkgconfigmodel.Proxy{ HTTP: "http_url", HTTPS: "https_url", NoProxy: []string{"no_proxy_1", "no_proxy_2"}, @@ -98,7 +99,7 @@ func TestProxyWithSecret(t *testing.T) { }, { name: "secrets fron UNIX env vars for proxy", - setup: func(t *testing.T, config Config, configPath string, resolver secrets.Mock) { + setup: func(t *testing.T, config pkgconfigmodel.Config, configPath string, resolver secrets.Mock) { resolver.SetFetchHookFunc(func(_ []string) (map[string]string, error) { return map[string]string{ "http_handle": "http_url", @@ -113,9 +114,9 @@ func TestProxyWithSecret(t *testing.T) { t.Setenv("HTTPS_PROXY", "ENC[https_handle]") t.Setenv("NO_PROXY", "ENC[no_proxy_1_handle],ENC[no_proxy_2_handle]") }, - tests: func(t *testing.T, config Config) { + tests: func(t *testing.T, config pkgconfigmodel.Config) { assert.Equal(t, - &Proxy{ + &pkgconfigmodel.Proxy{ HTTP: "http_url", HTTPS: "https_url", NoProxy: []string{"no_proxy_1", "no_proxy_2"}, @@ -125,7 +126,7 @@ func TestProxyWithSecret(t *testing.T) { }, { name: "secrets from maps with keys containing dots (ie 'additional_endpoints')", - setup: func(t *testing.T, config Config, configPath string, resolver secrets.Mock) { + setup: func(t *testing.T, config pkgconfigmodel.Config, configPath string, resolver secrets.Mock) { resolver.SetFetchHookFunc(func(_ []string) (map[string]string, error) { return map[string]string{ "api_key_1": "resolved_api_key_1", @@ -135,7 +136,7 @@ func TestProxyWithSecret(t *testing.T) { }) os.WriteFile(configPath, testAdditionalEndpointsConf, 0600) }, - tests: func(t *testing.T, config Config) { + tests: func(t *testing.T, config pkgconfigmodel.Config) { expected := map[string][]string{ "https://url1.com": { "resolved_api_key_1", @@ -153,11 +154,10 @@ func TestProxyWithSecret(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - // CircleCI sets NO_PROXY, so unset it for this test unsetEnvForTest(t, "NO_PROXY") - config := SetupConf() + config := Conf() config.SetWithoutSource("use_proxy_for_cloud_metadata", true) // Viper.MergeConfigOverride, which is used when secrets is enabled, will silently fail if a diff --git a/pkg/config/config_test.go b/pkg/config/setup/config_test.go similarity index 82% rename from pkg/config/config_test.go rename to pkg/config/setup/config_test.go index 1bf23ebc17ead..95cab3d41fc64 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/setup/config_test.go @@ -3,7 +3,9 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package config +//go:build test + +package setup import ( "fmt" @@ -14,13 +16,12 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/comp/core/secrets/secretsimpl" - "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func unsetEnvForTest(t *testing.T, env string) { @@ -37,7 +38,7 @@ func unsetEnvForTest(t *testing.T, env string) { } func TestDefaults(t *testing.T) { - config := SetupConf() + config := Conf() // Testing viper's handling of defaults assert.False(t, config.IsSet("site")) @@ -58,7 +59,7 @@ func TestUnexpectedUnicode(t *testing.T) { keyYaml := "api_\u202akey: fakeapikey\n" valueYaml := "api_key: fa\u202akeapikey\n" - testConfig := SetupConfFromYAML(keyYaml) + testConfig := ConfFromYAML(keyYaml) warnings := findUnexpectedUnicode(testConfig) require.Len(t, warnings, 1) @@ -66,7 +67,7 @@ func TestUnexpectedUnicode(t *testing.T) { assert.Contains(t, warnings[0], "Configuration key string") assert.Contains(t, warnings[0], "U+202A") - testConfig = SetupConfFromYAML(valueYaml) + testConfig = ConfFromYAML(valueYaml) warnings = findUnexpectedUnicode(testConfig) @@ -77,7 +78,7 @@ func TestUnexpectedUnicode(t *testing.T) { func TestUnexpectedNestedUnicode(t *testing.T) { yaml := "runtime_security_config:\n activity_dump:\n remote_storage:\n endpoints:\n logs_dd_url: \"http://\u202adatadawg.com\"" - testConfig := SetupConfFromYAML(yaml) + testConfig := ConfFromYAML(yaml) warnings := findUnexpectedUnicode(testConfig) require.Len(t, warnings, 1) @@ -109,7 +110,7 @@ func TestUnexpectedWhitespace(t *testing.T) { }, } for _, tc := range tests { - testConfig := SetupConfFromYAML(tc.yaml) + testConfig := ConfFromYAML(tc.yaml) warnings := findUnexpectedUnicode(testConfig) require.Len(t, warnings, 1) @@ -122,14 +123,14 @@ func TestUnknownKeysWarning(t *testing.T) { yamlBase := ` site: datadoghq.eu ` - confBase := SetupConfFromYAML(yamlBase) + confBase := ConfFromYAML(yamlBase) assert.Len(t, findUnknownKeys(confBase), 0) yamlWithUnknownKeys := ` site: datadoghq.eu unknown_key.unknown_subkey: true ` - confWithUnknownKeys := SetupConfFromYAML(yamlWithUnknownKeys) + confWithUnknownKeys := ConfFromYAML(yamlWithUnknownKeys) assert.Len(t, findUnknownKeys(confWithUnknownKeys), 1) confWithUnknownKeys.SetKnown("unknown_key.*") @@ -144,7 +145,7 @@ func TestUnknownVarsWarning(t *testing.T) { if unknown { exp = append(exp, v) } - assert.Equal(t, exp, findUnknownEnvVars(Mock(t), env, additional)) + assert.Equal(t, exp, findUnknownEnvVars(Conf(), env, additional)) } } t.Run("DD_API_KEY", test("DD_API_KEY", false, nil)) @@ -159,59 +160,58 @@ func TestUnknownVarsWarning(t *testing.T) { } func TestDefaultTraceManagedServicesEnvVarValue(t *testing.T) { - testConfig := SetupConfFromYAML("") + testConfig := ConfFromYAML("") assert.Equal(t, true, testConfig.Get("serverless.trace_managed_services")) } func TestExplicitFalseTraceManagedServicesEnvVar(t *testing.T) { t.Setenv("DD_TRACE_MANAGED_SERVICES", "false") - testConfig := SetupConfFromYAML("") + testConfig := ConfFromYAML("") assert.Equal(t, false, testConfig.Get("serverless.trace_managed_services")) } func TestDDHostnameFileEnvVar(t *testing.T) { t.Setenv("DD_API_KEY", "fakeapikey") t.Setenv("DD_HOSTNAME_FILE", "somefile") - testConfig := SetupConfFromYAML("") + testConfig := ConfFromYAML("") assert.Equal(t, "somefile", testConfig.Get("hostname_file")) } func TestIsCloudProviderEnabled(t *testing.T) { - holdValue := Datadog.Get("cloud_provider_metadata") - defer Datadog.SetWithoutSource("cloud_provider_metadata", holdValue) - - Datadog.SetWithoutSource("cloud_provider_metadata", []string{"aws", "gcp", "azure", "alibaba", "tencent"}) - assert.True(t, IsCloudProviderEnabled("AWS")) - assert.True(t, IsCloudProviderEnabled("GCP")) - assert.True(t, IsCloudProviderEnabled("Alibaba")) - assert.True(t, IsCloudProviderEnabled("Azure")) - assert.True(t, IsCloudProviderEnabled("Tencent")) - - Datadog.SetWithoutSource("cloud_provider_metadata", []string{"aws"}) - assert.True(t, IsCloudProviderEnabled("AWS")) - assert.False(t, IsCloudProviderEnabled("GCP")) - assert.False(t, IsCloudProviderEnabled("Alibaba")) - assert.False(t, IsCloudProviderEnabled("Azure")) - assert.False(t, IsCloudProviderEnabled("Tencent")) - - Datadog.SetWithoutSource("cloud_provider_metadata", []string{"tencent"}) - assert.False(t, IsCloudProviderEnabled("AWS")) - assert.False(t, IsCloudProviderEnabled("GCP")) - assert.False(t, IsCloudProviderEnabled("Alibaba")) - assert.False(t, IsCloudProviderEnabled("Azure")) - assert.True(t, IsCloudProviderEnabled("Tencent")) - - Datadog.SetWithoutSource("cloud_provider_metadata", []string{}) - assert.False(t, IsCloudProviderEnabled("AWS")) - assert.False(t, IsCloudProviderEnabled("GCP")) - assert.False(t, IsCloudProviderEnabled("Alibaba")) - assert.False(t, IsCloudProviderEnabled("Azure")) - assert.False(t, IsCloudProviderEnabled("Tencent")) + config := Conf() + + config.SetWithoutSource("cloud_provider_metadata", []string{"aws", "gcp", "azure", "alibaba", "tencent"}) + assert.True(t, IsCloudProviderEnabled("AWS", config)) + assert.True(t, IsCloudProviderEnabled("GCP", config)) + assert.True(t, IsCloudProviderEnabled("Alibaba", config)) + assert.True(t, IsCloudProviderEnabled("Azure", config)) + assert.True(t, IsCloudProviderEnabled("Tencent", config)) + + config.SetWithoutSource("cloud_provider_metadata", []string{"aws"}) + assert.True(t, IsCloudProviderEnabled("AWS", config)) + assert.False(t, IsCloudProviderEnabled("GCP", config)) + assert.False(t, IsCloudProviderEnabled("Alibaba", config)) + assert.False(t, IsCloudProviderEnabled("Azure", config)) + assert.False(t, IsCloudProviderEnabled("Tencent", config)) + + config.SetWithoutSource("cloud_provider_metadata", []string{"tencent"}) + assert.False(t, IsCloudProviderEnabled("AWS", config)) + assert.False(t, IsCloudProviderEnabled("GCP", config)) + assert.False(t, IsCloudProviderEnabled("Alibaba", config)) + assert.False(t, IsCloudProviderEnabled("Azure", config)) + assert.True(t, IsCloudProviderEnabled("Tencent", config)) + + config.SetWithoutSource("cloud_provider_metadata", []string{}) + assert.False(t, IsCloudProviderEnabled("AWS", config)) + assert.False(t, IsCloudProviderEnabled("GCP", config)) + assert.False(t, IsCloudProviderEnabled("Alibaba", config)) + assert.False(t, IsCloudProviderEnabled("Azure", config)) + assert.False(t, IsCloudProviderEnabled("Tencent", config)) } func TestEnvNestedConfig(t *testing.T) { - config := SetupConf() + config := Conf() config.BindEnv("foo.bar.nested") t.Setenv("DD_FOO_BAR_NESTED", "baz") @@ -221,12 +221,12 @@ func TestEnvNestedConfig(t *testing.T) { func TestProxy(t *testing.T) { type testCase struct { name string - setup func(t *testing.T, config Config) - tests func(t *testing.T, config Config) + setup func(t *testing.T, config pkgconfigmodel.Config) + tests func(t *testing.T, config pkgconfigmodel.Config) proxyForCloudMetadata bool } - expectedProxy := &Proxy{ + expectedProxy := &pkgconfigmodel.Proxy{ HTTP: "http_url", HTTPS: "https_url", NoProxy: []string{"a", "b", "c"}, @@ -235,7 +235,7 @@ func TestProxy(t *testing.T) { cases := []testCase{ { name: "no values", - tests: func(t *testing.T, config Config) { + tests: func(t *testing.T, config pkgconfigmodel.Config) { assert.Nil(t, config.Get("proxy")) assert.Nil(t, config.GetProxies()) }, @@ -243,53 +243,53 @@ func TestProxy(t *testing.T) { }, { name: "from configuration", - setup: func(t *testing.T, config Config) { + setup: func(t *testing.T, config pkgconfigmodel.Config) { config.SetWithoutSource("proxy", expectedProxy) }, - tests: func(t *testing.T, config Config) { + tests: func(t *testing.T, config pkgconfigmodel.Config) { assert.Equal(t, expectedProxy, config.GetProxies()) }, proxyForCloudMetadata: true, }, { name: "from UNIX env only upper case", - setup: func(t *testing.T, config Config) { + setup: func(t *testing.T, config pkgconfigmodel.Config) { t.Setenv("HTTP_PROXY", "http_url") t.Setenv("HTTPS_PROXY", "https_url") t.Setenv("NO_PROXY", "a,b,c") // comma-separated list }, - tests: func(t *testing.T, config Config) { + tests: func(t *testing.T, config pkgconfigmodel.Config) { assert.Equal(t, expectedProxy, config.GetProxies()) }, proxyForCloudMetadata: true, }, { name: "from env only lower case", - setup: func(t *testing.T, config Config) { + setup: func(t *testing.T, config pkgconfigmodel.Config) { t.Setenv("http_proxy", "http_url") t.Setenv("https_proxy", "https_url") t.Setenv("no_proxy", "a,b,c") // comma-separated list }, - tests: func(t *testing.T, config Config) { + tests: func(t *testing.T, config pkgconfigmodel.Config) { assert.Equal(t, expectedProxy, config.GetProxies()) }, proxyForCloudMetadata: true, }, { name: "from DD env vars only", - setup: func(t *testing.T, config Config) { + setup: func(t *testing.T, config pkgconfigmodel.Config) { t.Setenv("DD_PROXY_HTTP", "http_url") t.Setenv("DD_PROXY_HTTPS", "https_url") t.Setenv("DD_PROXY_NO_PROXY", "a b c") // space-separated list }, - tests: func(t *testing.T, config Config) { + tests: func(t *testing.T, config pkgconfigmodel.Config) { assert.Equal(t, expectedProxy, config.GetProxies()) }, proxyForCloudMetadata: true, }, { name: "from DD env vars precedence over UNIX env vars", - setup: func(t *testing.T, config Config) { + setup: func(t *testing.T, config pkgconfigmodel.Config) { t.Setenv("DD_PROXY_HTTP", "dd_http_url") t.Setenv("DD_PROXY_HTTPS", "dd_https_url") t.Setenv("DD_PROXY_NO_PROXY", "a b c") @@ -297,9 +297,9 @@ func TestProxy(t *testing.T) { t.Setenv("HTTPS_PROXY", "env_https_url") t.Setenv("NO_PROXY", "d,e,f") }, - tests: func(t *testing.T, config Config) { + tests: func(t *testing.T, config pkgconfigmodel.Config) { assert.Equal(t, - &Proxy{ + &pkgconfigmodel.Proxy{ HTTP: "dd_http_url", HTTPS: "dd_https_url", NoProxy: []string{"a", "b", "c"}, @@ -310,14 +310,14 @@ func TestProxy(t *testing.T) { }, { name: "from UNIX env vars and conf", - setup: func(t *testing.T, config Config) { + setup: func(t *testing.T, config pkgconfigmodel.Config) { t.Setenv("HTTP_PROXY", "http_env") config.SetWithoutSource("proxy.no_proxy", []string{"d", "e", "f"}) config.SetWithoutSource("proxy.http", "http_conf") }, - tests: func(t *testing.T, config Config) { + tests: func(t *testing.T, config pkgconfigmodel.Config) { assert.Equal(t, - &Proxy{ + &pkgconfigmodel.Proxy{ HTTP: "http_env", HTTPS: "", NoProxy: []string{"d", "e", "f"}, @@ -328,14 +328,14 @@ func TestProxy(t *testing.T) { }, { name: "from DD env vars and conf", - setup: func(t *testing.T, config Config) { + setup: func(t *testing.T, config pkgconfigmodel.Config) { t.Setenv("DD_PROXY_HTTP", "http_env") config.SetWithoutSource("proxy.no_proxy", []string{"d", "e", "f"}) config.SetWithoutSource("proxy.http", "http_conf") }, - tests: func(t *testing.T, config Config) { + tests: func(t *testing.T, config pkgconfigmodel.Config) { assert.Equal(t, - &Proxy{ + &pkgconfigmodel.Proxy{ HTTP: "http_env", HTTPS: "", NoProxy: []string{"d", "e", "f"}, @@ -346,7 +346,7 @@ func TestProxy(t *testing.T) { }, { name: "empty values precedence", - setup: func(t *testing.T, config Config) { + setup: func(t *testing.T, config pkgconfigmodel.Config) { t.Setenv("DD_PROXY_HTTP", "") t.Setenv("DD_PROXY_NO_PROXY", "a b c") t.Setenv("HTTP_PROXY", "env_http_url") @@ -354,9 +354,9 @@ func TestProxy(t *testing.T) { t.Setenv("NO_PROXY", "") config.SetWithoutSource("proxy.https", "https_conf") }, - tests: func(t *testing.T, config Config) { + tests: func(t *testing.T, config pkgconfigmodel.Config) { assert.Equal(t, - &Proxy{ + &pkgconfigmodel.Proxy{ HTTP: "", HTTPS: "", NoProxy: []string{"a", "b", "c"}, @@ -367,13 +367,13 @@ func TestProxy(t *testing.T) { }, { name: "proxy withou no_proxy", - setup: func(t *testing.T, config Config) { + setup: func(t *testing.T, config pkgconfigmodel.Config) { t.Setenv("DD_PROXY_HTTP", "http_url") t.Setenv("DD_PROXY_HTTPS", "https_url") }, - tests: func(t *testing.T, config Config) { + tests: func(t *testing.T, config pkgconfigmodel.Config) { assert.Equal(t, - &Proxy{ + &pkgconfigmodel.Proxy{ HTTP: "http_url", HTTPS: "https_url", }, @@ -384,10 +384,10 @@ func TestProxy(t *testing.T) { }, { name: "empty config with use_proxy_for_cloud_metadata", - setup: func(t *testing.T, config Config) {}, - tests: func(t *testing.T, config Config) { + setup: func(t *testing.T, config pkgconfigmodel.Config) {}, + tests: func(t *testing.T, config pkgconfigmodel.Config) { assert.Equal(t, - &Proxy{ + &pkgconfigmodel.Proxy{ HTTP: "", HTTPS: "", NoProxy: []string{"169.254.169.254", "100.100.100.200"}, @@ -398,14 +398,14 @@ func TestProxy(t *testing.T) { }, { name: "use proxy for cloud metadata", - setup: func(t *testing.T, config Config) { + setup: func(t *testing.T, config pkgconfigmodel.Config) { t.Setenv("DD_PROXY_HTTP", "http_url") t.Setenv("DD_PROXY_HTTPS", "https_url") t.Setenv("DD_PROXY_NO_PROXY", "a b c") }, - tests: func(t *testing.T, config Config) { + tests: func(t *testing.T, config pkgconfigmodel.Config) { assert.Equal(t, - &Proxy{ + &pkgconfigmodel.Proxy{ HTTP: "http_url", HTTPS: "https_url", NoProxy: []string{"a", "b", "c", "169.254.169.254", "100.100.100.200"}, @@ -421,7 +421,7 @@ func TestProxy(t *testing.T) { // CircleCI sets NO_PROXY, so unset it for this test unsetEnvForTest(t, "NO_PROXY") - config := SetupConf() + config := Conf() config.SetWithoutSource("use_proxy_for_cloud_metadata", c.proxyForCloudMetadata) // Viper.MergeConfigOverride, which is used when secrets is enabled, will silently fail if a @@ -445,7 +445,7 @@ func TestProxy(t *testing.T) { } func TestSanitizeAPIKeyConfig(t *testing.T) { - config := SetupConf() + config := Conf() config.SetWithoutSource("api_key", "foo") sanitizeAPIKeyConfig(config, "api_key") @@ -465,7 +465,7 @@ func TestSanitizeAPIKeyConfig(t *testing.T) { } func TestNumWorkers(t *testing.T) { - config := SetupConf() + config := Conf() config.SetWithoutSource("python_version", "2") config.SetWithoutSource("tracemalloc_debug", true) @@ -502,20 +502,20 @@ api_key: fakeapikey external_config: external_agent_dd_url: "https://custom.external-agent.datadoghq.eu" ` - AddOverrides(map[string]interface{}{ + pkgconfigmodel.AddOverrides(map[string]interface{}{ "api_key": "overrided", }) - config := SetupConfFromYAML(datadogYaml) - applyOverrideFuncs(config) + config := ConfFromYAML(datadogYaml) + pkgconfigmodel.ApplyOverrideFuncs(config) assert.Equal(config.GetString("api_key"), "overrided", "the api key should have been overrided") assert.Equal(config.GetString("dd_url"), "https://app.datadoghq.eu", "this shouldn't be overrided") - AddOverrides(map[string]interface{}{ + pkgconfigmodel.AddOverrides(map[string]interface{}{ "dd_url": "http://localhost", }) - applyOverrideFuncs(config) + pkgconfigmodel.ApplyOverrideFuncs(config) assert.Equal(config.GetString("api_key"), "overrided", "the api key should have been overrided") assert.Equal(config.GetString("dd_url"), "http://localhost", "this dd_url should have been overrided") @@ -546,7 +546,7 @@ dogstatsd_mapper_profiles: tags: foo: "$1" ` - testConfig := SetupConfFromYAML(datadogYaml) + testConfig := ConfFromYAML(datadogYaml) profiles, err := getDogstatsdMappingProfilesConfig(testConfig) @@ -589,7 +589,7 @@ func TestDogstatsdMappingProfilesEmpty(t *testing.T) { datadogYaml := ` dogstatsd_mapper_profiles: ` - testConfig := SetupConfFromYAML(datadogYaml) + testConfig := ConfFromYAML(datadogYaml) profiles, err := getDogstatsdMappingProfilesConfig(testConfig) @@ -604,7 +604,7 @@ func TestDogstatsdMappingProfilesError(t *testing.T) { dogstatsd_mapper_profiles: - abc ` - testConfig := SetupConfFromYAML(datadogYaml) + testConfig := ConfFromYAML(datadogYaml) profiles, err := getDogstatsdMappingProfilesConfig(testConfig) expectedErrorMsg := "Could not parse dogstatsd_mapper_profiles" @@ -624,12 +624,13 @@ func TestDogstatsdMappingProfilesEnv(t *testing.T) { {Match: "some_other_profile.*", Name: "some_other_profile.abc", Tags: map[string]string{"a": "$1"}}, }}, } - mappings, _ := GetDogstatsdMappingProfiles() + cfg := Conf() + mappings, _ := GetDogstatsdMappingProfiles(cfg) assert.Equal(t, mappings, expected) } func TestGetValidHostAliasesWithConfig(t *testing.T) { - config := SetupConfFromYAML(`host_aliases: ["foo", "-bar"]`) + config := ConfFromYAML(`host_aliases: ["foo", "-bar"]`) assert.EqualValues(t, getValidHostAliasesWithConfig(config), []string{"foo"}) } @@ -637,14 +638,14 @@ func TestNetworkDevicesNamespace(t *testing.T) { datadogYaml := ` network_devices: ` - config := SetupConfFromYAML(datadogYaml) + config := ConfFromYAML(datadogYaml) assert.Equal(t, "default", config.GetString("network_devices.namespace")) datadogYaml = ` network_devices: namespace: dev ` - config = SetupConfFromYAML(datadogYaml) + config = ConfFromYAML(datadogYaml) assert.Equal(t, "dev", config.GetString("network_devices.namespace")) } @@ -656,7 +657,7 @@ logs_config: docker_path_override: "/custom/path" ` - config := SetupConfFromYAML(datadogYaml) + config := ConfFromYAML(datadogYaml) err := checkConflictingOptions(config) assert.NotNil(t, err) @@ -669,7 +670,7 @@ logs_config: use_podman_logs: true ` - config := SetupConfFromYAML(datadogYaml) + config := ConfFromYAML(datadogYaml) err := checkConflictingOptions(config) assert.Nil(t, err) @@ -726,7 +727,7 @@ proxy: ` expectedURL := "somehost:1234" expectedHTTPURL := "https://" + expectedURL - testConfig := SetupConfFromYAML(datadogYaml) + testConfig := ConfFromYAML(datadogYaml) LoadProxyFromEnv(testConfig) err := setupFipsEndpoints(testConfig) require.NoError(t, err) @@ -748,7 +749,7 @@ fips: expectedURL = "localhost:50" expectedHTTPURL = "http://" + expectedURL - testConfig = SetupConfFromYAML(datadogYamlFips) + testConfig = ConfFromYAML(datadogYamlFips) LoadProxyFromEnv(testConfig) err = setupFipsEndpoints(testConfig) require.NoError(t, err) @@ -770,8 +771,8 @@ fips: ` expectedHTTPURL = "https://" + expectedURL - testConfig = SetupConfFromYAML(datadogYamlFips) - testConfig.Set("skip_ssl_validation", false, model.SourceAgentRuntime) // should be overridden by fips.tls_verify + testConfig = ConfFromYAML(datadogYamlFips) + testConfig.Set("skip_ssl_validation", false, pkgconfigmodel.SourceAgentRuntime) // should be overridden by fips.tls_verify LoadProxyFromEnv(testConfig) err = setupFipsEndpoints(testConfig) require.NoError(t, err) @@ -784,8 +785,8 @@ fips: assert.Equal(t, true, testConfig.GetBool("skip_ssl_validation")) assert.Nil(t, testConfig.GetProxies()) - testConfig.Set("skip_ssl_validation", true, model.SourceAgentRuntime) // should be overridden by fips.tls_verify - testConfig.Set("fips.tls_verify", true, model.SourceAgentRuntime) + testConfig.Set("skip_ssl_validation", true, pkgconfigmodel.SourceAgentRuntime) // should be overridden by fips.tls_verify + testConfig.Set("fips.tls_verify", true, pkgconfigmodel.SourceAgentRuntime) LoadProxyFromEnv(testConfig) err = setupFipsEndpoints(testConfig) require.NoError(t, err) @@ -794,7 +795,7 @@ fips: assert.Nil(t, testConfig.GetProxies()) } -func assertFipsProxyExpectedConfig(t *testing.T, expectedBaseHTTPURL, expectedBaseURL string, rng bool, c Config) { +func assertFipsProxyExpectedConfig(t *testing.T, expectedBaseHTTPURL, expectedBaseURL string, rng bool, c pkgconfigmodel.Config) { if rng { assert.Equal(t, expectedBaseHTTPURL+"01", c.GetString("dd_url")) assert.Equal(t, expectedBaseHTTPURL+"02", c.GetString("apm_config.apm_dd_url")) @@ -837,7 +838,7 @@ fips: port_range_start: 5000 ` - testConfig := SetupConfFromYAML(datadogYaml) + testConfig := ConfFromYAML(datadogYaml) err := setupFipsEndpoints(testConfig) require.Error(t, err) } @@ -847,7 +848,7 @@ func TestEnablePeerServiceStatsAggregationYAML(t *testing.T) { apm_config: peer_service_aggregation: true ` - testConfig := SetupConfFromYAML(datadogYaml) + testConfig := ConfFromYAML(datadogYaml) err := setupFipsEndpoints(testConfig) require.NoError(t, err) require.True(t, testConfig.GetBool("apm_config.peer_service_aggregation")) @@ -856,7 +857,7 @@ apm_config: apm_config: peer_service_aggregation: false ` - testConfig = SetupConfFromYAML(datadogYaml) + testConfig = ConfFromYAML(datadogYaml) err = setupFipsEndpoints(testConfig) require.NoError(t, err) require.False(t, testConfig.GetBool("apm_config.peer_service_aggregation")) @@ -867,7 +868,7 @@ func TestEnablePeerTagsAggregationYAML(t *testing.T) { apm_config: peer_tags_aggregation: true ` - testConfig := SetupConfFromYAML(datadogYaml) + testConfig := ConfFromYAML(datadogYaml) err := setupFipsEndpoints(testConfig) require.NoError(t, err) require.True(t, testConfig.GetBool("apm_config.peer_tags_aggregation")) @@ -876,7 +877,7 @@ apm_config: apm_config: peer_tags_aggregation: false ` - testConfig = SetupConfFromYAML(datadogYaml) + testConfig = ConfFromYAML(datadogYaml) err = setupFipsEndpoints(testConfig) require.NoError(t, err) require.False(t, testConfig.GetBool("apm_config.peer_tags_aggregation")) @@ -884,19 +885,19 @@ apm_config: func TestEnablePeerServiceStatsAggregationEnv(t *testing.T) { t.Setenv("DD_APM_PEER_SERVICE_AGGREGATION", "true") - testConfig := SetupConfFromYAML("") + testConfig := ConfFromYAML("") require.True(t, testConfig.GetBool("apm_config.peer_service_aggregation")) t.Setenv("DD_APM_PEER_SERVICE_AGGREGATION", "false") - testConfig = SetupConfFromYAML("") + testConfig = ConfFromYAML("") require.False(t, testConfig.GetBool("apm_config.peer_service_aggregation")) } func TestEnablePeerTagsAggregationEnv(t *testing.T) { t.Setenv("DD_APM_PEER_TAGS_AGGREGATION", "true") - testConfig := SetupConfFromYAML("") + testConfig := ConfFromYAML("") require.True(t, testConfig.GetBool("apm_config.peer_tags_aggregation")) t.Setenv("DD_APM_PEER_TAGS_AGGREGATION", "false") - testConfig = SetupConfFromYAML("") + testConfig = ConfFromYAML("") require.False(t, testConfig.GetBool("apm_config.peer_tags_aggregation")) } @@ -905,7 +906,7 @@ func TestEnableStatsComputationBySpanKindYAML(t *testing.T) { apm_config: compute_stats_by_span_kind: false ` - testConfig := SetupConfFromYAML(datadogYaml) + testConfig := ConfFromYAML(datadogYaml) err := setupFipsEndpoints(testConfig) require.NoError(t, err) require.False(t, testConfig.GetBool("apm_config.compute_stats_by_span_kind")) @@ -914,7 +915,7 @@ apm_config: apm_config: compute_stats_by_span_kind: true ` - testConfig = SetupConfFromYAML(datadogYaml) + testConfig = ConfFromYAML(datadogYaml) err = setupFipsEndpoints(testConfig) require.NoError(t, err) require.True(t, testConfig.GetBool("apm_config.compute_stats_by_span_kind")) @@ -922,42 +923,42 @@ apm_config: func TestComputeStatsBySpanKindEnv(t *testing.T) { t.Setenv("DD_APM_COMPUTE_STATS_BY_SPAN_KIND", "false") - testConfig := SetupConfFromYAML("") + testConfig := ConfFromYAML("") require.False(t, testConfig.GetBool("apm_config.compute_stats_by_span_kind")) t.Setenv("DD_APM_COMPUTE_STATS_BY_SPAN_KIND", "true") - testConfig = SetupConfFromYAML("") + testConfig = ConfFromYAML("") require.True(t, testConfig.GetBool("apm_config.compute_stats_by_span_kind")) } func TestIsRemoteConfigEnabled(t *testing.T) { t.Setenv("DD_REMOTE_CONFIGURATION_ENABLED", "true") - testConfig := SetupConfFromYAML("") + testConfig := ConfFromYAML("") require.True(t, IsRemoteConfigEnabled(testConfig)) t.Setenv("DD_FIPS_ENABLED", "true") - testConfig = SetupConfFromYAML("") + testConfig = ConfFromYAML("") require.False(t, IsRemoteConfigEnabled(testConfig)) t.Setenv("DD_FIPS_ENABLED", "false") t.Setenv("DD_SITE", "ddog-gov.com") - testConfig = SetupConfFromYAML("") + testConfig = ConfFromYAML("") require.False(t, IsRemoteConfigEnabled(testConfig)) } func TestGetRemoteConfigurationAllowedIntegrations(t *testing.T) { // EMPTY configuration - testConfig := SetupConfFromYAML("") + testConfig := ConfFromYAML("") require.Equal(t, map[string]bool{}, GetRemoteConfigurationAllowedIntegrations(testConfig)) t.Setenv("DD_REMOTE_CONFIGURATION_AGENT_INTEGRATIONS_ALLOW_LIST", "[\"POSTgres\", \"redisDB\"]") - testConfig = SetupConfFromYAML("") + testConfig = ConfFromYAML("") require.Equal(t, map[string]bool{"postgres": true, "redisdb": true}, GetRemoteConfigurationAllowedIntegrations(testConfig), ) t.Setenv("DD_REMOTE_CONFIGURATION_AGENT_INTEGRATIONS_BLOCK_LIST", "[\"mySQL\", \"redisDB\"]") - testConfig = SetupConfFromYAML("") + testConfig = ConfFromYAML("") require.Equal(t, map[string]bool{"postgres": true, "redisdb": false, "mysql": false}, GetRemoteConfigurationAllowedIntegrations(testConfig), @@ -965,39 +966,38 @@ func TestGetRemoteConfigurationAllowedIntegrations(t *testing.T) { } func TestLanguageDetectionSettings(t *testing.T) { - testConfig := SetupConfFromYAML("") + testConfig := ConfFromYAML("") require.False(t, testConfig.GetBool("language_detection.enabled")) t.Setenv("DD_LANGUAGE_DETECTION_ENABLED", "true") - testConfig = SetupConfFromYAML("") + testConfig = ConfFromYAML("") require.True(t, testConfig.GetBool("language_detection.enabled")) } func TestPeerTagsYAML(t *testing.T) { - testConfig := SetupConfFromYAML("") + testConfig := ConfFromYAML("") require.Nil(t, testConfig.GetStringSlice("apm_config.peer_tags")) datadogYaml := ` apm_config: peer_tags: ["aws.s3.bucket", "db.instance", "db.system"] ` - testConfig = SetupConfFromYAML(datadogYaml) + testConfig = ConfFromYAML(datadogYaml) require.Equal(t, []string{"aws.s3.bucket", "db.instance", "db.system"}, testConfig.GetStringSlice("apm_config.peer_tags")) } func TestPeerTagsEnv(t *testing.T) { - testConfig := SetupConfFromYAML("") + testConfig := ConfFromYAML("") require.Nil(t, testConfig.GetStringSlice("apm_config.peer_tags")) t.Setenv("DD_APM_PEER_TAGS", `["aws.s3.bucket","db.instance","db.system"]`) - testConfig = SetupConfFromYAML("") + testConfig = ConfFromYAML("") require.Equal(t, []string{"aws.s3.bucket", "db.instance", "db.system"}, testConfig.GetStringSlice("apm_config.peer_tags")) } func TestLogDefaults(t *testing.T) { - // New config - c := NewConfig("test", "DD", strings.NewReplacer(".", "_")) + c := pkgconfigmodel.NewConfig("test", "DD", strings.NewReplacer(".", "_")) require.Equal(t, 0, c.GetInt("log_file_max_rolls")) require.Equal(t, "", c.GetString("log_file_max_size")) require.Equal(t, "", c.GetString("log_file")) @@ -1006,7 +1006,7 @@ func TestLogDefaults(t *testing.T) { require.False(t, c.GetBool("log_format_json")) // Test Config (same as Datadog) - testConfig := SetupConf() + testConfig := Conf() require.Equal(t, 1, testConfig.GetInt("log_file_max_rolls")) require.Equal(t, "10Mb", testConfig.GetString("log_file_max_size")) require.Equal(t, "", testConfig.GetString("log_file")) @@ -1015,17 +1015,20 @@ func TestLogDefaults(t *testing.T) { require.False(t, testConfig.GetBool("log_format_json")) // SystemProbe config + + SystemProbe := pkgconfigmodel.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + InitSystemProbeConfig(SystemProbe) + require.Equal(t, 1, SystemProbe.GetInt("log_file_max_rolls")) require.Equal(t, "10Mb", SystemProbe.GetString("log_file_max_size")) require.Equal(t, defaultSystemProbeLogFilePath, SystemProbe.GetString("log_file")) require.Equal(t, "info", SystemProbe.GetString("log_level")) require.True(t, SystemProbe.GetBool("log_to_console")) require.False(t, SystemProbe.GetBool("log_format_json")) - } func TestProxyNotLoaded(t *testing.T) { - conf := SetupConf() + conf := Conf() os.Setenv("AWS_LAMBDA_FUNCTION_NAME", "TestFunction") proxyHTTP := "http://localhost:1234" @@ -1040,7 +1043,7 @@ func TestProxyNotLoaded(t *testing.T) { } func TestProxyLoadedFromEnvVars(t *testing.T) { - conf := SetupConf() + conf := Conf() os.Setenv("AWS_LAMBDA_FUNCTION_NAME", "TestFunction") proxyHTTP := "http://localhost:1234" @@ -1048,20 +1051,17 @@ func TestProxyLoadedFromEnvVars(t *testing.T) { t.Setenv("DD_PROXY_HTTP", proxyHTTP) t.Setenv("DD_PROXY_HTTPS", proxyHTTPS) - Datadog = conf - LoadWithoutSecret() + LoadWithoutSecret(conf, []string{}) proxyHTTPConfig := conf.GetString("proxy.http") proxyHTTPSConfig := conf.GetString("proxy.https") assert.Equal(t, proxyHTTP, proxyHTTPConfig) assert.Equal(t, proxyHTTPS, proxyHTTPSConfig) - - Datadog = SetupConf() } func TestProxyLoadedFromConfigFile(t *testing.T) { - conf := SetupConf() + conf := Conf() os.Setenv("AWS_LAMBDA_FUNCTION_NAME", "TestFunction") tempDir := t.TempDir() @@ -1069,20 +1069,17 @@ func TestProxyLoadedFromConfigFile(t *testing.T) { os.WriteFile(configTest, []byte("proxy:\n http: \"http://localhost:1234\"\n https: \"https://localhost:1234\""), 0644) conf.AddConfigPath(tempDir) - Datadog = conf - LoadWithoutSecret() + LoadWithoutSecret(conf, []string{}) proxyHTTPConfig := conf.GetString("proxy.http") proxyHTTPSConfig := conf.GetString("proxy.https") assert.Equal(t, "http://localhost:1234", proxyHTTPConfig) assert.Equal(t, "https://localhost:1234", proxyHTTPSConfig) - - Datadog = SetupConf() } func TestProxyLoadedFromConfigFileAndEnvVars(t *testing.T) { - conf := SetupConf() + conf := Conf() os.Setenv("AWS_LAMBDA_FUNCTION_NAME", "TestFunction") proxyHTTPEnvVar := "http://localhost:1234" @@ -1095,14 +1092,11 @@ func TestProxyLoadedFromConfigFileAndEnvVars(t *testing.T) { os.WriteFile(configTest, []byte("proxy:\n http: \"http://localhost:5678\"\n https: \"http://localhost:5678\""), 0644) conf.AddConfigPath(tempDir) - Datadog = conf - LoadWithoutSecret() + LoadWithoutSecret(conf, []string{}) proxyHTTPConfig := conf.GetString("proxy.http") proxyHTTPSConfig := conf.GetString("proxy.https") assert.Equal(t, proxyHTTPEnvVar, proxyHTTPConfig) assert.Equal(t, proxyHTTPSEnvVar, proxyHTTPSConfig) - - Datadog = SetupConf() } diff --git a/pkg/config/config_windows.go b/pkg/config/setup/config_windows.go similarity index 99% rename from pkg/config/config_windows.go rename to pkg/config/setup/config_windows.go index 5ae33a7014ced..8f3e199ec3387 100644 --- a/pkg/config/config_windows.go +++ b/pkg/config/setup/config_windows.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package config +package setup import ( "os" diff --git a/pkg/config/setup/go.mod b/pkg/config/setup/go.mod new file mode 100644 index 0000000000000..7984e78cf1752 --- /dev/null +++ b/pkg/config/setup/go.mod @@ -0,0 +1,98 @@ +module github.com/DataDog/datadog-agent/pkg/config/setup + +go 1.20 + +replace ( + github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../comp/core/flare/types + github.com/DataDog/datadog-agent/comp/core/secrets => ../../../comp/core/secrets + github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../comp/core/telemetry + github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../collector/check/defaults + github.com/DataDog/datadog-agent/pkg/config/env => ../env + github.com/DataDog/datadog-agent/pkg/config/model => ../model/ + github.com/DataDog/datadog-agent/pkg/telemetry => ../../telemetry + github.com/DataDog/datadog-agent/pkg/util/executable => ../../util/executable + github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../util/filesystem + github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../util/fxutil + github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../util/hostname/validate + github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log + github.com/DataDog/datadog-agent/pkg/util/optional => ../../util/optional + github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber + github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../util/system/socket + github.com/DataDog/datadog-agent/pkg/util/winutil => ../../util/winutil + + // Internal deps fix version + github.com/spf13/cast => github.com/DataDog/cast v1.3.1-0.20190301154711-1ee8c8bd14a3 +) + +require ( + github.com/DataDog/datadog-agent/comp/core/secrets v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/config/env v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/config/model v0.50.0-rc.4 + github.com/DataDog/datadog-agent/pkg/util/executable v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/util/log v0.50.0-rc.4 + github.com/DataDog/datadog-agent/pkg/util/optional v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/util/winutil v0.0.0-00010101000000-000000000000 + github.com/stretchr/testify v1.8.4 + gopkg.in/yaml.v2 v2.4.0 +) + +require ( + github.com/DataDog/datadog-agent/comp/core/flare/types v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/comp/core/telemetry v0.50.0-rc.4 // indirect + github.com/DataDog/datadog-agent/pkg/telemetry v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.50.0-rc.4 // indirect + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.50.0-rc.4 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.50.0-rc.4 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.50.0-rc.4 // indirect + github.com/DataDog/viper v1.12.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/magiconair/properties v1.8.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/pelletier/go-toml v1.2.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/prometheus/client_golang v1.17.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.11.1 // indirect + github.com/shirou/gopsutil/v3 v3.23.9 // indirect + github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/cobra v1.7.0 // indirect + github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + go.opentelemetry.io/otel v1.20.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.42.0 // indirect + go.opentelemetry.io/otel/metric v1.20.0 // indirect + go.opentelemetry.io/otel/sdk v1.20.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.20.0 // indirect + go.opentelemetry.io/otel/trace v1.20.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/dig v1.17.0 // indirect + go.uber.org/fx v1.18.2 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.23.0 // indirect + golang.org/x/mod v0.8.0 // indirect + golang.org/x/sys v0.14.0 // indirect + golang.org/x/text v0.9.0 // indirect + golang.org/x/tools v0.6.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/pkg/config/setup/go.sum b/pkg/config/setup/go.sum new file mode 100644 index 0000000000000..77f3f028b1749 --- /dev/null +++ b/pkg/config/setup/go.sum @@ -0,0 +1,346 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/cast v1.3.1-0.20190301154711-1ee8c8bd14a3 h1:SobA9WYm4K/MUtWlbKaomWTmnuYp1KhIm8Wlx3vmpsg= +github.com/DataDog/cast v1.3.1-0.20190301154711-1ee8c8bd14a3/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/DataDog/viper v1.12.0 h1:FufyZpZPxyszafSV5B8Q8it75IhhuJwH0T7QpT6HnD0= +github.com/DataDog/viper v1.12.0/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shirou/gopsutil/v3 v3.23.9 h1:ZI5bWVeu2ep4/DIxB4U9okeYJ7zp/QLTO4auRb/ty/E= +github.com/shirou/gopsutil/v3 v3.23.9/go.mod h1:x/NWSb71eMcjFIO0vhyGW5nZ7oSIgVjrCnADckb85GA= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= +go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= +go.opentelemetry.io/otel/exporters/prometheus v0.42.0 h1:jwV9iQdvp38fxXi8ZC+lNpxjK16MRcZlpDYvbuO1FiA= +go.opentelemetry.io/otel/exporters/prometheus v0.42.0/go.mod h1:f3bYiqNqhoPxkvI2LrXqQVC546K7BuRDL/kKuxkujhA= +go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= +go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= +go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= +go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= +go.opentelemetry.io/otel/sdk/metric v1.20.0 h1:5eD40l/H2CqdKmbSV7iht2KMK0faAIL2pVYzJOWobGk= +go.opentelemetry.io/otel/sdk/metric v1.20.0/go.mod h1:AGvpC+YF/jblITiafMTYgvRBUiwi9hZf0EYE2E5XlS8= +go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= +go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= +go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= +go.uber.org/fx v1.18.2 h1:bUNI6oShr+OVFQeU8cDNbnN7VFsu+SsjHzUF51V/GAU= +go.uber.org/fx v1.18.2/go.mod h1:g0V1KMQ66zIRk8bLu3Ea5Jt2w/cHlOIp4wdRsgh0JaY= +go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= +go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/pkg/config/ipc_address.go b/pkg/config/setup/ipc_address.go similarity index 93% rename from pkg/config/ipc_address.go rename to pkg/config/setup/ipc_address.go index 81c2c1f4baa85..ef17507b454f6 100644 --- a/pkg/config/ipc_address.go +++ b/pkg/config/setup/ipc_address.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package config +package setup import ( "fmt" @@ -38,8 +38,8 @@ func IsLocalAddress(address string) (string, error) { } // GetIPCAddress returns the IPC address or an error if the address is not local -func GetIPCAddress() (string, error) { - return getIPCAddress(Datadog) +func GetIPCAddress(config pkgconfigmodel.Reader) (string, error) { + return getIPCAddress(config) } // GetIPCPort returns the IPC port diff --git a/pkg/config/ipc_address_test.go b/pkg/config/setup/ipc_address_test.go similarity index 99% rename from pkg/config/ipc_address_test.go rename to pkg/config/setup/ipc_address_test.go index 809ddf90a4766..3774f6ca695c5 100644 --- a/pkg/config/ipc_address_test.go +++ b/pkg/config/setup/ipc_address_test.go @@ -3,15 +3,14 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package config +package setup import ( "strings" "testing" - "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config/model" + "github.com/stretchr/testify/require" ) const ( diff --git a/pkg/config/otlp.go b/pkg/config/setup/otlp.go similarity index 95% rename from pkg/config/otlp.go rename to pkg/config/setup/otlp.go index 3a7589dcc1e5c..c42f7eb671bc7 100644 --- a/pkg/config/otlp.go +++ b/pkg/config/setup/otlp.go @@ -3,7 +3,11 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2021-present Datadog, Inc. -package config +package setup + +import ( + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" +) // OTLP configuration paths. const ( @@ -23,8 +27,8 @@ const ( OTLPDebug = OTLPSection + "." + OTLPDebugKey ) -// SetupOTLP related configuration. -func SetupOTLP(config Config) { +// OTLP related configuration. +func OTLP(config pkgconfigmodel.Config) { config.BindEnvAndSetDefault(OTLPTracePort, 5003) config.BindEnvAndSetDefault(OTLPMetricsEnabled, true) config.BindEnvAndSetDefault(OTLPTracesEnabled, true) @@ -55,7 +59,7 @@ func SetupOTLP(config Config) { // since we get the configuration through GetStringMap // // We are missing TLS settings: since some of them need more work to work right they are not included here. -func setupOTLPEnvironmentVariables(config Config) { +func setupOTLPEnvironmentVariables(config pkgconfigmodel.Config) { // gRPC settings config.BindEnv(OTLPSection + ".receiver.protocols.grpc.endpoint") config.BindEnv(OTLPSection + ".receiver.protocols.grpc.transport") diff --git a/pkg/config/process.go b/pkg/config/setup/process.go similarity index 94% rename from pkg/config/process.go rename to pkg/config/setup/process.go index 3d1357d271dd7..a9329dca9db22 100644 --- a/pkg/config/process.go +++ b/pkg/config/setup/process.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package config +package setup import ( "net" @@ -12,7 +12,7 @@ import ( "sync" "time" - "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -88,7 +88,7 @@ var processesAddOverrideOnce sync.Once // procBindEnvAndSetDefault is a helper function that generates both "DD_PROCESS_CONFIG_" and "DD_PROCESS_AGENT_" prefixes from a key. // We need this helper function because the standard BindEnvAndSetDefault can only generate one prefix from a key. -func procBindEnvAndSetDefault(config Config, key string, val interface{}) { +func procBindEnvAndSetDefault(config pkgconfigmodel.Config, key string, val interface{}) { // Uppercase, replace "." with "_" and add "DD_" prefix to key so that we follow the same environment // variable convention as the core agent. processConfigKey := "DD_" + strings.Replace(strings.ToUpper(key), ".", "_", -1) @@ -100,14 +100,14 @@ func procBindEnvAndSetDefault(config Config, key string, val interface{}) { // procBindEnv is a helper function that generates both "DD_PROCESS_CONFIG_" and "DD_PROCESS_AGENT_" prefixes from a key, but does not set a default. // We need this helper function because the standard BindEnv can only generate one prefix from a key. -func procBindEnv(config Config, key string) { +func procBindEnv(config pkgconfigmodel.Config, key string) { processConfigKey := "DD_" + strings.Replace(strings.ToUpper(key), ".", "_", -1) processAgentKey := strings.Replace(processConfigKey, "PROCESS_CONFIG", "PROCESS_AGENT", 1) config.BindEnv(key, processConfigKey, processAgentKey) } -func setupProcesses(config Config) { +func setupProcesses(config pkgconfigmodel.Config) { // "process_config.enabled" is deprecated. We must still be able to detect if it is present, to know if we should use it // or container_collection.enabled and process_collection.enabled. procBindEnv(config, "process_config.enabled") @@ -206,38 +206,38 @@ func setupProcesses(config Config) { procBindEnvAndSetDefault(config, "process_config.language_detection.grpc_port", DefaultProcessEntityStreamPort) processesAddOverrideOnce.Do(func() { - AddOverrideFunc(loadProcessTransforms) + pkgconfigmodel.AddOverrideFunc(loadProcessTransforms) }) } // loadProcessTransforms loads transforms associated with process config settings. -func loadProcessTransforms(config Config) { +func loadProcessTransforms(config pkgconfigmodel.Config) { if config.IsSet("process_config.enabled") { log.Info("process_config.enabled is deprecated, use process_config.container_collection.enabled " + "and process_config.process_collection.enabled instead, " + "see https://docs.datadoghq.com/infrastructure/process#installation for more information") procConfigEnabled := strings.ToLower(config.GetString("process_config.enabled")) if procConfigEnabled == "disabled" { - config.Set("process_config.process_collection.enabled", false, model.SourceAgentRuntime) - config.Set("process_config.container_collection.enabled", false, model.SourceAgentRuntime) + config.Set("process_config.process_collection.enabled", false, pkgconfigmodel.SourceAgentRuntime) + config.Set("process_config.container_collection.enabled", false, pkgconfigmodel.SourceAgentRuntime) } else if enabled, _ := strconv.ParseBool(procConfigEnabled); enabled { // "true" - config.Set("process_config.process_collection.enabled", true, model.SourceAgentRuntime) - config.Set("process_config.container_collection.enabled", false, model.SourceAgentRuntime) + config.Set("process_config.process_collection.enabled", true, pkgconfigmodel.SourceAgentRuntime) + config.Set("process_config.container_collection.enabled", false, pkgconfigmodel.SourceAgentRuntime) } else { // "false" - config.Set("process_config.process_collection.enabled", false, model.SourceAgentRuntime) - config.Set("process_config.container_collection.enabled", true, model.SourceAgentRuntime) + config.Set("process_config.process_collection.enabled", false, pkgconfigmodel.SourceAgentRuntime) + config.Set("process_config.container_collection.enabled", true, pkgconfigmodel.SourceAgentRuntime) } } } // GetProcessAPIAddressPort returns the API endpoint of the process agent -func GetProcessAPIAddressPort() (string, error) { - address, err := GetIPCAddress() +func GetProcessAPIAddressPort(config pkgconfigmodel.Reader) (string, error) { + address, err := GetIPCAddress(config) if err != nil { return "", err } - port := Datadog.GetInt("process_config.cmd_port") + port := config.GetInt("process_config.cmd_port") if port <= 0 { log.Warnf("Invalid process_config.cmd_port -- %d, using default port %d", port, DefaultProcessCmdPort) port = DefaultProcessCmdPort diff --git a/pkg/config/process_test.go b/pkg/config/setup/process_test.go similarity index 97% rename from pkg/config/process_test.go rename to pkg/config/setup/process_test.go index cdb3fac144ade..ceef05e505a29 100644 --- a/pkg/config/process_test.go +++ b/pkg/config/setup/process_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package config +package setup import ( "fmt" @@ -11,12 +11,13 @@ import ( "testing" "time" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/stretchr/testify/assert" ) // TestProcessDefaults tests to ensure that the config has set process settings correctly func TestProcessDefaultConfig(t *testing.T) { - cfg := SetupConf() + cfg := Conf() for _, tc := range []struct { key string @@ -139,7 +140,7 @@ func TestProcessDefaultConfig(t *testing.T) { // TestPrefixes tests that for every corresponding `DD_PROCESS_CONFIG` prefix, there is a `DD_PROCESS_AGENT` prefix as well. func TestProcessAgentPrefixes(t *testing.T) { - envVarSlice := SetupConf().GetEnvVars() + envVarSlice := Conf().GetEnvVars() envVars := make(map[string]struct{}, len(envVarSlice)) for _, envVar := range envVarSlice { envVars[envVar] = struct{}{} @@ -160,7 +161,7 @@ func TestProcessAgentPrefixes(t *testing.T) { // TestPrefixes tests that for every corresponding `DD_PROCESS_AGENT` prefix, there is a `DD_PROCESS_CONFIG` prefix as well. func TestProcessConfigPrefixes(t *testing.T) { - envVarSlice := SetupConf().GetEnvVars() + envVarSlice := Conf().GetEnvVars() envVars := make(map[string]struct{}, len(envVarSlice)) for _, envVar := range envVarSlice { envVars[envVar] = struct{}{} @@ -182,7 +183,7 @@ func TestProcessConfigPrefixes(t *testing.T) { } func TestEnvVarOverride(t *testing.T) { - cfg := SetupConf() + cfg := Conf() for _, tc := range []struct { key, env, value string @@ -467,7 +468,7 @@ func TestEnvVarOverride(t *testing.T) { }) } -func readCfgWithType(cfg Config, key, expType string) interface{} { +func readCfgWithType(cfg pkgconfigmodel.Config, key, expType string) interface{} { switch expType { case "stringSlice": return cfg.GetStringSlice(key) @@ -479,7 +480,7 @@ func readCfgWithType(cfg Config, key, expType string) interface{} { } func TestEnvVarCustomSensitiveWords(t *testing.T) { - cfg := SetupConf() + cfg := Conf() expectedPrefixes := []string{"DD_", "DD_PROCESS_CONFIG_", "DD_PROCESS_AGENT_"} for i, tc := range []struct { @@ -511,7 +512,7 @@ func TestEnvVarCustomSensitiveWords(t *testing.T) { } func TestProcBindEnvAndSetDefault(t *testing.T) { - cfg := SetupConf() + cfg := Conf() procBindEnvAndSetDefault(cfg, "process_config.foo.bar", "asdf") envs := map[string]struct{}{} @@ -530,7 +531,7 @@ func TestProcBindEnvAndSetDefault(t *testing.T) { } func TestProcBindEnv(t *testing.T) { - cfg := SetupConf() + cfg := Conf() procBindEnv(cfg, "process_config.foo.bar") envs := map[string]struct{}{} @@ -575,7 +576,7 @@ func TestProcConfigEnabledTransform(t *testing.T) { }, } { t.Run("process_config.enabled="+tc.procConfigEnabled, func(t *testing.T) { - cfg := SetupConf() + cfg := Conf() cfg.SetWithoutSource("process_config.enabled", tc.procConfigEnabled) loadProcessTransforms(cfg) @@ -583,5 +584,4 @@ func TestProcConfigEnabledTransform(t *testing.T) { assert.Equal(t, tc.expectedProcessCollection, cfg.GetBool("process_config.process_collection.enabled")) }) } - } diff --git a/pkg/config/standard_names.go b/pkg/config/setup/standard_names.go similarity index 98% rename from pkg/config/standard_names.go rename to pkg/config/setup/standard_names.go index 609e73dc5d9f6..62176ffc60d48 100644 --- a/pkg/config/standard_names.go +++ b/pkg/config/setup/standard_names.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package config +package setup // StandardJMXIntegrations is the list of standard jmx integrations. // This list is used by the Agent to determine if an integration is JMXFetch-based, diff --git a/pkg/config/system_probe.go b/pkg/config/setup/system_probe.go similarity index 98% rename from pkg/config/system_probe.go rename to pkg/config/setup/system_probe.go index bd5ee872bbc6b..03d879ccd6d66 100644 --- a/pkg/config/system_probe.go +++ b/pkg/config/setup/system_probe.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package config +package setup import ( "encoding/json" @@ -13,6 +13,7 @@ import ( "strings" "time" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -57,8 +58,7 @@ const ( ) // InitSystemProbeConfig declares all the configuration values normally read from system-probe.yaml. -func InitSystemProbeConfig(cfg Config) { - +func InitSystemProbeConfig(cfg pkgconfigmodel.Config) { cfg.BindEnvAndSetDefault("ignore_host_etc", false) cfg.BindEnvAndSetDefault("go_core_dump", false) @@ -226,7 +226,7 @@ func InitSystemProbeConfig(cfg Config) { cfg.BindEnvAndSetDefault(join(smjtNS, "args"), defaultServiceMonitoringJavaAgentArgs) cfg.BindEnvAndSetDefault(join(smjtNS, "allow_regex"), "") cfg.BindEnvAndSetDefault(join(smjtNS, "block_regex"), "") - cfg.BindEnvAndSetDefault(join(smNS, "enable_http_stats_by_status_code"), false) + cfg.BindEnvAndSetDefault(join(smNS, "enable_http_stats_by_status_code"), true) cfg.BindEnvAndSetDefault(join(netNS, "enable_gateway_lookup"), true, "DD_SYSTEM_PROBE_NETWORK_ENABLE_GATEWAY_LOOKUP") // Default value (100000) is set in `adjustUSM`, to avoid having "deprecation warning", due to the default value. @@ -354,7 +354,7 @@ func suffixHostEtc(suffix string) string { // eventMonitorBindEnvAndSetDefault is a helper function that generates both "DD_RUNTIME_SECURITY_CONFIG_" and "DD_EVENT_MONITORING_CONFIG_" // prefixes from a key. We need this helper function because the standard BindEnvAndSetDefault can only generate one prefix, but we want to // support both for backwards compatibility. -func eventMonitorBindEnvAndSetDefault(config Config, key string, val interface{}) { +func eventMonitorBindEnvAndSetDefault(config pkgconfigmodel.Config, key string, val interface{}) { // Uppercase, replace "." with "_" and add "DD_" prefix to key so that we follow the same environment // variable convention as the core agent. emConfigKey := "DD_" + strings.Replace(strings.ToUpper(key), ".", "_", -1) @@ -365,7 +365,7 @@ func eventMonitorBindEnvAndSetDefault(config Config, key string, val interface{} } // eventMonitorBindEnv is the same as eventMonitorBindEnvAndSetDefault, but without setting a default. -func eventMonitorBindEnv(config Config, key string) { +func eventMonitorBindEnv(config pkgconfigmodel.Config, key string) { emConfigKey := "DD_" + strings.Replace(strings.ToUpper(key), ".", "_", -1) runtimeSecKey := strings.Replace(emConfigKey, "EVENT_MONITORING_CONFIG", "RUNTIME_SECURITY_CONFIG", 1) diff --git a/pkg/config/system_probe_cws.go b/pkg/config/setup/system_probe_cws.go similarity index 96% rename from pkg/config/system_probe_cws.go rename to pkg/config/setup/system_probe_cws.go index c98e08302a9b5..c9cceaed24788 100644 --- a/pkg/config/system_probe_cws.go +++ b/pkg/config/setup/system_probe_cws.go @@ -3,9 +3,13 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package config +package setup -func initCWSSystemProbeConfig(cfg Config) { +import ( + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" +) + +func initCWSSystemProbeConfig(cfg pkgconfigmodel.Config) { // CWS - general config // the following entries are platform specific // - runtime_security_config.policies.dir @@ -18,6 +22,7 @@ func initCWSSystemProbeConfig(cfg Config) { cfg.BindEnvAndSetDefault("runtime_security_config.policies.watch_dir", false) cfg.BindEnvAndSetDefault("runtime_security_config.policies.monitor.enabled", false) cfg.BindEnvAndSetDefault("runtime_security_config.policies.monitor.per_rule_enabled", false) + cfg.BindEnvAndSetDefault("runtime_security_config.policies.monitor.report_internal_policies", false) cfg.BindEnvAndSetDefault("runtime_security_config.event_server.burst", 40) cfg.BindEnvAndSetDefault("runtime_security_config.event_server.retention", "6s") cfg.BindEnvAndSetDefault("runtime_security_config.event_server.rate", 10) diff --git a/pkg/config/system_probe_cws_notwin.go b/pkg/config/setup/system_probe_cws_notwin.go similarity index 76% rename from pkg/config/system_probe_cws_notwin.go rename to pkg/config/setup/system_probe_cws_notwin.go index bde0548a05b12..05032e7d2276a 100644 --- a/pkg/config/system_probe_cws_notwin.go +++ b/pkg/config/setup/system_probe_cws_notwin.go @@ -5,9 +5,13 @@ //go:build !windows -package config +package setup -func platformCWSConfig(cfg Config) { +import ( + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" +) + +func platformCWSConfig(cfg pkgconfigmodel.Config) { cfg.BindEnvAndSetDefault("runtime_security_config.policies.dir", DefaultRuntimePoliciesDir) cfg.BindEnvAndSetDefault("runtime_security_config.socket", "/opt/datadog-agent/run/runtime-security.sock") } diff --git a/pkg/config/system_probe_cws_windows.go b/pkg/config/setup/system_probe_cws_windows.go similarity index 84% rename from pkg/config/system_probe_cws_windows.go rename to pkg/config/setup/system_probe_cws_windows.go index 527e4c82edfb9..9a84b449d4df3 100644 --- a/pkg/config/system_probe_cws_windows.go +++ b/pkg/config/setup/system_probe_cws_windows.go @@ -5,15 +5,16 @@ //go:build windows -package config +package setup import ( "path/filepath" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/winutil" ) -func platformCWSConfig(cfg Config) { +func platformCWSConfig(cfg pkgconfigmodel.Config) { programdata, err := winutil.GetProgramDataDir() if err == nil { cfg.BindEnvAndSetDefault("runtime_security_config.policies.dir", filepath.Join(programdata, "runtime-security.d")) diff --git a/pkg/config/setup/test_helpers.go b/pkg/config/setup/test_helpers.go new file mode 100644 index 0000000000000..9ed3db639bee4 --- /dev/null +++ b/pkg/config/setup/test_helpers.go @@ -0,0 +1,34 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build test + +package setup + +import ( + "bytes" + "log" + "strings" + + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" +) + +// Conf generates and returns a new configuration +func Conf() pkgconfigmodel.Config { + conf := pkgconfigmodel.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + InitConfig(conf) + return conf +} + +// ConfFromYAML generates a configuration from the given yaml config +func ConfFromYAML(yamlConfig string) pkgconfigmodel.Config { + conf := Conf() + conf.SetConfigType("yaml") + e := conf.ReadConfig(bytes.NewBuffer([]byte(yamlConfig))) + if e != nil { + log.Println(e) + } + return conf +} diff --git a/pkg/config/unexpectedunicodefinder.go b/pkg/config/setup/unexpectedunicodefinder.go similarity index 99% rename from pkg/config/unexpectedunicodefinder.go rename to pkg/config/setup/unexpectedunicodefinder.go index 63452148dc411..63767f60edf59 100644 --- a/pkg/config/unexpectedunicodefinder.go +++ b/pkg/config/setup/unexpectedunicodefinder.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2022-present Datadog, Inc. -package config +package setup import ( "unicode" diff --git a/pkg/config/unexpectedunicodefinder_test.go b/pkg/config/setup/unexpectedunicodefinder_test.go similarity index 99% rename from pkg/config/unexpectedunicodefinder_test.go rename to pkg/config/setup/unexpectedunicodefinder_test.go index f217501013776..276a270b24e00 100644 --- a/pkg/config/unexpectedunicodefinder_test.go +++ b/pkg/config/setup/unexpectedunicodefinder_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2022-present Datadog, Inc. -package config +package setup import "testing" diff --git a/pkg/config/test_helpers.go b/pkg/config/test_helpers.go index e5ff9f48225fc..c57424f207aef 100644 --- a/pkg/config/test_helpers.go +++ b/pkg/config/test_helpers.go @@ -8,12 +8,11 @@ package config import ( - "bytes" - "log" "strings" "testing" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) var ( @@ -21,25 +20,13 @@ var ( SetFeatures = env.SetFeatures // SetFeaturesNoCleanup is alias from env SetFeaturesNoCleanup = env.SetFeaturesNoCleanup -) -// SetupConf generates and returns a new configuration -func SetupConf() Config { - conf := NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) - InitConfig(conf) - return conf -} + // SetupConf generates and returns a new configuration + SetupConf = pkgconfigsetup.Conf -// SetupConfFromYAML generates a configuration from the given yaml config -func SetupConfFromYAML(yamlConfig string) Config { - conf := SetupConf() - conf.SetConfigType("yaml") - e := conf.ReadConfig(bytes.NewBuffer([]byte(yamlConfig))) - if e != nil { - log.Println(e) - } - return conf -} + // SetupConfFromYAML generates a configuration from the given yaml config + SetupConfFromYAML = pkgconfigsetup.ConfFromYAML +) // ResetSystemProbeConfig resets the configuration. func ResetSystemProbeConfig(t *testing.T) { @@ -48,5 +35,5 @@ func ResetSystemProbeConfig(t *testing.T) { SystemProbe = originalConfig }) SystemProbe = NewConfig("system-probe", "DD", strings.NewReplacer(".", "_")) - InitSystemProbeConfig(SystemProbe) + pkgconfigsetup.InitSystemProbeConfig(SystemProbe) } diff --git a/pkg/ebpf/ebpftest/testlogwriter.go b/pkg/ebpf/ebpftest/testlogwriter.go new file mode 100644 index 0000000000000..b11cd924cc2ec --- /dev/null +++ b/pkg/ebpf/ebpftest/testlogwriter.go @@ -0,0 +1,26 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package ebpftest + +import ( + "testing" +) + +// TestLogWriter wraps the testing.T object and provides a simple +// io.Writer interface, to be used with DumpMaps functions +// Very simple implementation now for output in debug functions so +// newlines aren't handled: each call to Write is just sent to +// t.Log +type TestLogWriter struct { + T *testing.T +} + +// Write method implementation, sends the data to t.Log() +func (tlw *TestLogWriter) Write(p []byte) (int, error) { + tlw.T.Log(string(p)) + + return len(p), nil +} diff --git a/pkg/ebpf/feature_test.go b/pkg/ebpf/feature_test.go new file mode 100644 index 0000000000000..4fcd368baa9c3 --- /dev/null +++ b/pkg/ebpf/feature_test.go @@ -0,0 +1,41 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package ebpf + +import ( + "errors" + "testing" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/features" + "github.com/cilium/ebpf/rlimit" + "github.com/stretchr/testify/require" +) + +func TestKprobeHelperProbe(t *testing.T) { + err := rlimit.RemoveMemlock() + require.NoError(t, err) + + var requiredFuncs = []asm.BuiltinFunc{ + asm.FnMapLookupElem, + asm.FnMapUpdateElem, + asm.FnMapDeleteElem, + asm.FnPerfEventOutput, + asm.FnPerfEventRead, + } + for _, rf := range requiredFuncs { + if err := features.HaveProgramHelper(ebpf.Kprobe, rf); err != nil { + if errors.Is(err, ebpf.ErrNotSupported) { + t.Errorf("%s unsupported", rf.String()) + } else { + t.Errorf("error checking for ebpf helper %s support: %s", rf.String(), err) + } + } + } +} diff --git a/pkg/logs/internal/status/status.go b/pkg/logs/internal/status/status.go index d6588fec37674..817dc42a5cec2 100644 --- a/pkg/logs/internal/status/status.go +++ b/pkg/logs/internal/status/status.go @@ -95,3 +95,20 @@ func (s *LogStatus) Dump() string { } return fmt.Sprintf("&LogStatus{status: %s, err: %#v}", status, s.err) } + +// String returns a human readable representation of the status. +func (s *LogStatus) String() string { + s.mu.Lock() + defer s.mu.Unlock() + + switch s.status { + case isPending: + return "pending" + case isSuccess: + return "success" + case isError: + return "error" + default: + return fmt.Sprintf("unknown status: %d", s.status) + } +} diff --git a/pkg/logs/internal/status/status_test.go b/pkg/logs/internal/status/status_test.go index c8412711583b2..8fe6e3d377c6b 100644 --- a/pkg/logs/internal/status/status_test.go +++ b/pkg/logs/internal/status/status_test.go @@ -43,6 +43,19 @@ func (s *LogStatusSuite) TestError() { s.Equal("Error: bar", s.status.GetError()) } +func (s *LogStatusSuite) TesString() { + s.status = NewLogStatus() + s.Equal("pending", s.status.String()) + + s.status.Error(errors.New("bar")) + + s.Equal("error", s.status.String()) + s.Equal("Error: bar", s.status.GetError()) + + s.status.Success() + s.Equal("success", s.status.String()) +} + func TestLogStatusSuite(t *testing.T) { suite.Run(t, new(LogStatusSuite)) } diff --git a/pkg/logs/schedulers/schedulers.go b/pkg/logs/schedulers/schedulers.go index 8a5eeb0db2d67..72f184eb6ecd9 100644 --- a/pkg/logs/schedulers/schedulers.go +++ b/pkg/logs/schedulers/schedulers.go @@ -40,6 +40,11 @@ func (ss *Schedulers) AddScheduler(scheduler Scheduler) { } } +// GetSources returns all the log source from the source manager. +func (ss *Schedulers) GetSources() []*sources.LogSource { + return ss.mgr.GetSources() +} + // Start starts all schedulers in the collection. func (ss *Schedulers) Start() { for _, s := range ss.schedulers { diff --git a/pkg/network/ebpf/c/protocols/classification/defs.h b/pkg/network/ebpf/c/protocols/classification/defs.h index 8f56a739783d1..e6f1739c80069 100644 --- a/pkg/network/ebpf/c/protocols/classification/defs.h +++ b/pkg/network/ebpf/c/protocols/classification/defs.h @@ -137,7 +137,8 @@ typedef enum { PROG_HTTP, PROG_HTTP2_HANDLE_FIRST_FRAME, PROG_HTTP2_FRAME_FILTER, - PROG_HTTP2_FRAME_PARSER, + PROG_HTTP2_HEADERS_PARSER, + PROG_HTTP2_EOS_PARSER, PROG_KAFKA, PROG_GRPC, // Add before this value. diff --git a/pkg/network/ebpf/c/protocols/grpc/helpers.h b/pkg/network/ebpf/c/protocols/grpc/helpers.h index 46bf6f59a9bfb..18a644a494b9e 100644 --- a/pkg/network/ebpf/c/protocols/grpc/helpers.h +++ b/pkg/network/ebpf/c/protocols/grpc/helpers.h @@ -43,7 +43,7 @@ static __always_inline grpc_status_t is_content_type_grpc(const struct __sk_buff return PAYLOAD_UNDETERMINED; } - string_literal_header len; + string_literal_header_t len; if (skb_info->data_off + sizeof(len) > frame_end) { return PAYLOAD_NOT_GRPC; } @@ -68,7 +68,7 @@ static __always_inline grpc_status_t is_content_type_grpc(const struct __sk_buff // skip_header increments skb_info->data_off so that it skips the remainder of // the current header (of which we already parsed the index value). static __always_inline void skip_literal_header(const struct __sk_buff *skb, skb_info_t *skb_info, __u32 frame_end, __u8 idx) { - string_literal_header len; + string_literal_header_t len; if (skb_info->data_off + sizeof(len) > frame_end) { return; } @@ -89,7 +89,7 @@ static __always_inline void skip_literal_header(const struct __sk_buff *skb, skb // Scan headers goes through the headers in a frame, and tries to find a // content-type header or a method header. static __always_inline grpc_status_t scan_headers(const struct __sk_buff *skb, skb_info_t *skb_info, __u32 frame_length) { - field_index idx; + field_index_t idx; grpc_status_t status = PAYLOAD_UNDETERMINED; __u32 frame_end = skb_info->data_off + frame_length; @@ -145,7 +145,7 @@ static __always_inline grpc_status_t scan_headers(const struct __sk_buff *skb, s static __always_inline grpc_status_t is_grpc(const struct __sk_buff *skb, const skb_info_t *skb_info) { grpc_status_t status = PAYLOAD_UNDETERMINED; char frame_buf[HTTP2_FRAME_HEADER_SIZE]; - struct http2_frame current_frame; + http2_frame_t current_frame; frame_info_t frames[GRPC_MAX_FRAMES_TO_PROCESS]; u32 frames_count = 0; diff --git a/pkg/network/ebpf/c/protocols/http2/decoding-defs.h b/pkg/network/ebpf/c/protocols/http2/decoding-defs.h index ede2777ab5d7c..4ab868169b72a 100644 --- a/pkg/network/ebpf/c/protocols/http2/decoding-defs.h +++ b/pkg/network/ebpf/c/protocols/http2/decoding-defs.h @@ -5,7 +5,19 @@ #include "protocols/http2/defs.h" -#define HTTP2_FRAMES_PER_TAIL_CALL 7 +// Represents the maximum number of frames we'll process in a single tail call in `handle_eos_frames` program. +#define HTTP2_MAX_FRAMES_FOR_EOS_PARSER_PER_TAIL_CALL 200 +// Represents the maximum number of tail calls to process EOS frames. +// Currently we have up to 120 frames in a packet, thus 1 tail call is enough. +#define HTTP2_MAX_TAIL_CALLS_FOR_EOS_PARSER 1 +#define HTTP2_MAX_FRAMES_FOR_EOS_PARSER (HTTP2_MAX_FRAMES_FOR_EOS_PARSER_PER_TAIL_CALL * HTTP2_MAX_TAIL_CALLS_FOR_EOS_PARSER) + +// Represents the maximum number of frames we'll process in a single tail call in `handle_headers_frames` program. +#define HTTP2_MAX_FRAMES_FOR_HEADERS_PARSER_PER_TAIL_CALL 18 +// Represents the maximum number of tail calls to process headers frames. +// Currently we have up to 120 frames in a packet, thus 7 (7*18 = 126) tail calls is enough. +#define HTTP2_MAX_TAIL_CALLS_FOR_HEADERS_PARSER 7 +#define HTTP2_MAX_FRAMES_FOR_HEADERS_PARSER (HTTP2_MAX_FRAMES_FOR_HEADERS_PARSER_PER_TAIL_CALL * HTTP2_MAX_TAIL_CALLS_FOR_HEADERS_PARSER) // Maximum number of frames to be processed in a single TCP packet. That's also the number of tail calls we'll have. // NOTE: we may need to revisit this const if we need to capture more connections. #define HTTP2_MAX_FRAMES_ITERATIONS 120 @@ -131,14 +143,14 @@ typedef struct { } http2_header_t; typedef struct { - struct http2_frame frame; + http2_frame_t frame; __u32 offset; } http2_frame_with_offset; typedef struct { + __u16 iteration; + __u16 frames_count; http2_frame_with_offset frames_array[HTTP2_MAX_FRAMES_ITERATIONS] __attribute__((aligned(8))); - __u8 iteration; - __u8 frames_count; } http2_tail_call_state_t; typedef struct { diff --git a/pkg/network/ebpf/c/protocols/http2/decoding.h b/pkg/network/ebpf/c/protocols/http2/decoding.h index 4fcabfe89cf5d..c123fefa209c7 100644 --- a/pkg/network/ebpf/c/protocols/http2/decoding.h +++ b/pkg/network/ebpf/c/protocols/http2/decoding.h @@ -353,7 +353,7 @@ static __always_inline void handle_end_of_stream(http2_stream_t *current_stream, bpf_map_delete_elem(&http2_in_flight, http2_stream_key_template); } -static __always_inline void process_headers_frame(struct __sk_buff *skb, http2_stream_t *current_stream, skb_info_t *skb_info, conn_tuple_t *tup, dynamic_table_index_t *dynamic_index, struct http2_frame *current_frame_header, http2_telemetry_t *http2_tel) { +static __always_inline void process_headers_frame(struct __sk_buff *skb, http2_stream_t *current_stream, skb_info_t *skb_info, conn_tuple_t *tup, dynamic_table_index_t *dynamic_index, http2_frame_t *current_frame_header, http2_telemetry_t *http2_tel) { const __u32 zero = 0; // Allocating an array of headers, to hold all interesting headers from the frame. @@ -367,46 +367,10 @@ static __always_inline void process_headers_frame(struct __sk_buff *skb, http2_s process_headers(skb, dynamic_index, current_stream, headers_to_process, interesting_headers, http2_tel); } -static __always_inline void parse_frame(struct __sk_buff *skb, skb_info_t *skb_info, conn_tuple_t *tup, http2_ctx_t *http2_ctx, struct http2_frame *current_frame, http2_telemetry_t *http2_tel) { - http2_ctx->http2_stream_key.stream_id = current_frame->stream_id; - http2_stream_t *current_stream = http2_fetch_stream(&http2_ctx->http2_stream_key); - if (current_stream == NULL) { - return; - } - - if (current_frame->type == kHeadersFrame) { - process_headers_frame(skb, current_stream, skb_info, tup, &http2_ctx->dynamic_index, current_frame, http2_tel); - } - - // When we accept an RST, it means that the current stream is terminated. - // See: https://datatracker.ietf.org/doc/html/rfc7540#section-6.4 - bool is_rst = current_frame->type == kRSTStreamFrame; - // If rst, and stream is empty (no status code, or no response) then delete from inflight - if (is_rst && (current_stream->response_status_code == 0 || current_stream->request_started == 0)) { - bpf_map_delete_elem(&http2_in_flight, &http2_ctx->http2_stream_key); - return; - } - - bool should_handle_end_of_stream = false; - if (is_rst) { - __sync_fetch_and_add(&http2_tel->end_of_stream_rst, 1); - should_handle_end_of_stream = true; - } else if ((current_frame->flags & HTTP2_END_OF_STREAM) == HTTP2_END_OF_STREAM) { - __sync_fetch_and_add(&http2_tel->end_of_stream, 1); - should_handle_end_of_stream = true; - } - - if (should_handle_end_of_stream) { - handle_end_of_stream(current_stream, &http2_ctx->http2_stream_key, http2_tel); - } - - return; -} - // A similar implementation of read_http2_frame_header, but instead of getting both a char array and an out parameter, -// we get only the out parameter (equals to struct http2_frame * representation of the char array) and we perform the +// we get only the out parameter (equals to http2_frame_t* representation of the char array) and we perform the // field adjustments we have in read_http2_frame_header. -static __always_inline bool format_http2_frame_header(struct http2_frame *out) { +static __always_inline bool format_http2_frame_header(http2_frame_t *out) { if (is_empty_frame_header((char *)out)) { return false; } @@ -465,11 +429,11 @@ static __always_inline void fix_header_frame(struct __sk_buff *skb, skb_info_t * return; } -static __always_inline void reset_frame(struct http2_frame *out) { - *out = (struct http2_frame){ 0 }; +static __always_inline void reset_frame(http2_frame_t *out) { + *out = (http2_frame_t){ 0 }; } -static __always_inline bool get_first_frame(struct __sk_buff *skb, skb_info_t *skb_info, frame_header_remainder_t *frame_state, struct http2_frame *current_frame, http2_telemetry_t *http2_tel) { +static __always_inline bool get_first_frame(struct __sk_buff *skb, skb_info_t *skb_info, frame_header_remainder_t *frame_state, http2_frame_t *current_frame, http2_telemetry_t *http2_tel) { // No state, try reading a frame. if (frame_state == NULL) { // Checking we have enough bytes in the packet to read a frame header. @@ -550,16 +514,16 @@ static __always_inline bool get_first_frame(struct __sk_buff *skb, skb_info_t *s // - HEADERS frames // - RST_STREAM frames // - DATA frames with the END_STREAM flag set -static __always_inline __u8 find_relevant_frames(struct __sk_buff *skb, skb_info_t *skb_info, http2_frame_with_offset *frames_array, __u8 original_index, http2_telemetry_t *http2_tel) { +static __always_inline __u16 find_relevant_frames(struct __sk_buff *skb, skb_info_t *skb_info, http2_frame_with_offset *frames_array, __u8 original_index, http2_telemetry_t *http2_tel) { bool is_headers_or_rst_frame, is_data_end_of_stream; - struct http2_frame current_frame = {}; + http2_frame_t current_frame = {}; // We may have found a relevant frame already in http2_handle_first_frame, // so we need to adjust the index accordingly. We do not set // interesting_frame_index to original_index directly, as this will confuse // the verifier, leading it into thinking the index could have an arbitrary // value. - __u8 interesting_frame_index = original_index == 1; + __u16 interesting_frame_index = original_index == 1; __u32 iteration = 0; #pragma unroll(HTTP2_MAX_FRAMES_TO_FILTER) @@ -603,7 +567,7 @@ static __always_inline __u8 find_relevant_frames(struct __sk_buff *skb, skb_info SEC("socket/http2_handle_first_frame") int socket__http2_handle_first_frame(struct __sk_buff *skb) { const __u32 zero = 0; - struct http2_frame current_frame = {}; + http2_frame_t current_frame = {}; dispatcher_arguments_t dispatcher_args_copy; bpf_memset(&dispatcher_args_copy, 0, sizeof(dispatcher_arguments_t)); @@ -714,9 +678,7 @@ int socket__http2_filter(struct __sk_buff *skb) { // We have a remainder new_frame_state.remainder = local_skb_info.data_off - local_skb_info.data_end; bpf_map_update_elem(&http2_remainder, &dispatcher_args_copy.tup, &new_frame_state, BPF_ANY); - } - - if (local_skb_info.data_off < local_skb_info.data_end && local_skb_info.data_off + HTTP2_FRAME_HEADER_SIZE > local_skb_info.data_end) { + } else if (local_skb_info.data_off < local_skb_info.data_end && local_skb_info.data_off + HTTP2_FRAME_HEADER_SIZE > local_skb_info.data_end) { // We have a frame header remainder new_frame_state.remainder = HTTP2_FRAME_HEADER_SIZE - (local_skb_info.data_end - local_skb_info.data_off); bpf_memset(new_frame_state.buf, 0, HTTP2_FRAME_HEADER_SIZE); @@ -735,14 +697,20 @@ int socket__http2_filter(struct __sk_buff *skb) { // We have couple of interesting headers, launching tail calls to handle them. if (bpf_map_update_elem(&http2_iterations, &dispatcher_args_copy, iteration_value, BPF_NOEXIST) >= 0) { // We managed to cache the iteration_value in the http2_iterations map. - bpf_tail_call_compat(skb, &protocols_progs, PROG_HTTP2_FRAME_PARSER); + bpf_tail_call_compat(skb, &protocols_progs, PROG_HTTP2_HEADERS_PARSER); } return 0; } -SEC("socket/http2_frames_parser") -int socket__http2_frames_parser(struct __sk_buff *skb) { +// The program is responsible for parsing all headers frames. For each headers frame we parse the headers, +// fill the dynamic table with the new interesting literal headers, and modifying the streams accordingly. +// The program can be called multiple times (via "self call" of tail calls) in case we have more frames to parse +// than the maximum number of frames we can process in a single tail call. +// The program is being called after socket__http2_filter, and it is being called only if we have interesting frames. +// The program calls socket__http2_eos_parser to finalize the streams and enqueue them to be sent to the user mode. +SEC("socket/http2_headers_parser") +int socket__http2_headers_parser(struct __sk_buff *skb) { dispatcher_arguments_t dispatcher_args_copy; bpf_memset(&dispatcher_args_copy, 0, sizeof(dispatcher_arguments_t)); if (!fetch_dispatching_arguments(&dispatcher_args_copy.tup, &dispatcher_args_copy.skb_info)) { @@ -784,8 +752,10 @@ int socket__http2_frames_parser(struct __sk_buff *skb) { normalize_tuple(&http2_ctx->http2_stream_key.tup); http2_ctx->dynamic_index.tup = dispatcher_args_copy.tup; - #pragma unroll(HTTP2_FRAMES_PER_TAIL_CALL) - for (__u8 index = 0; index < HTTP2_FRAMES_PER_TAIL_CALL; index++) { + http2_stream_t *current_stream = NULL; + + #pragma unroll(HTTP2_MAX_FRAMES_FOR_HEADERS_PARSER_PER_TAIL_CALL) + for (__u16 index = 0; index < HTTP2_MAX_FRAMES_FOR_HEADERS_PARSER_PER_TAIL_CALL; index++) { if (tail_call_state->iteration >= HTTP2_MAX_FRAMES_ITERATIONS) { break; } @@ -797,14 +767,27 @@ int socket__http2_frames_parser(struct __sk_buff *skb) { } tail_call_state->iteration += 1; - dispatcher_args_copy.skb_info.data_off = current_frame.offset; + if (current_frame.frame.type != kHeadersFrame) { + continue; + } - parse_frame(skb, &dispatcher_args_copy.skb_info, &dispatcher_args_copy.tup, http2_ctx, ¤t_frame.frame, http2_tel); + http2_ctx->http2_stream_key.stream_id = current_frame.frame.stream_id; + current_stream = http2_fetch_stream(&http2_ctx->http2_stream_key); + if (current_stream == NULL) { + continue; + } + dispatcher_args_copy.skb_info.data_off = current_frame.offset; + process_headers_frame(skb, current_stream, &dispatcher_args_copy.skb_info, &dispatcher_args_copy.tup, &http2_ctx->dynamic_index, ¤t_frame.frame, http2_tel); } - if (tail_call_state->iteration < HTTP2_MAX_FRAMES_ITERATIONS && tail_call_state->iteration < tail_call_state->frames_count) { - bpf_tail_call_compat(skb, &protocols_progs, PROG_HTTP2_FRAME_PARSER); + if (tail_call_state->iteration < HTTP2_MAX_FRAMES_ITERATIONS && + tail_call_state->iteration < tail_call_state->frames_count && + tail_call_state->iteration < HTTP2_MAX_FRAMES_FOR_HEADERS_PARSER) { + bpf_tail_call_compat(skb, &protocols_progs, PROG_HTTP2_HEADERS_PARSER); } + // Zeroing the iteration index to call EOS parser + tail_call_state->iteration = 0; + bpf_tail_call_compat(skb, &protocols_progs, PROG_HTTP2_EOS_PARSER); delete_iteration: // restoring the original value. @@ -814,4 +797,103 @@ int socket__http2_frames_parser(struct __sk_buff *skb) { return 0; } +// The program is responsible for parsing all frames that mark the end of a stream. +// We consider a frame as marking the end of a stream if it is either: +// - An headers or data frame with END_STREAM flag set. +// - An RST_STREAM frame. +// The program is being called after socket__http2_headers_parser, and it finalizes the streams and enqueue them +// to be sent to the user mode. +// The program is ready to be called multiple times (via "self call" of tail calls) in case we have more frames to +// process than the maximum number of frames we can process in a single tail call. +SEC("socket/http2_eos_parser") +int socket__http2_eos_parser(struct __sk_buff *skb) { + dispatcher_arguments_t dispatcher_args_copy; + bpf_memset(&dispatcher_args_copy, 0, sizeof(dispatcher_arguments_t)); + if (!fetch_dispatching_arguments(&dispatcher_args_copy.tup, &dispatcher_args_copy.skb_info)) { + return 0; + } + + // A single packet can contain multiple HTTP/2 frames, due to instruction limitations we have divided the + // processing into multiple tail calls, where each tail call process a single frame. We must have context when + // we are processing the frames, for example, to know how many bytes have we read in the packet, or it we reached + // to the maximum number of frames we can process. For that we are checking if the iteration context already exists. + // If not, creating a new one to be used for further processing + http2_tail_call_state_t *tail_call_state = bpf_map_lookup_elem(&http2_iterations, &dispatcher_args_copy); + if (tail_call_state == NULL) { + // We didn't find the cached context, aborting. + return 0; + } + + const __u32 zero = 0; + http2_telemetry_t *http2_tel = bpf_map_lookup_elem(&http2_telemetry, &zero); + if (http2_tel == NULL) { + goto delete_iteration; + } + + http2_frame_with_offset *frames_array = tail_call_state->frames_array; + http2_frame_with_offset current_frame; + + http2_ctx_t *http2_ctx = bpf_map_lookup_elem(&http2_ctx_heap, &zero); + if (http2_ctx == NULL) { + goto delete_iteration; + } + bpf_memset(http2_ctx, 0, sizeof(http2_ctx_t)); + http2_ctx->http2_stream_key.tup = dispatcher_args_copy.tup; + normalize_tuple(&http2_ctx->http2_stream_key.tup); + + bool is_rst = false, is_end_of_stream = false; + http2_stream_t *current_stream = NULL; + + #pragma unroll(HTTP2_MAX_FRAMES_FOR_EOS_PARSER_PER_TAIL_CALL) + for (__u16 index = 0; index < HTTP2_MAX_FRAMES_FOR_EOS_PARSER_PER_TAIL_CALL; index++) { + if (tail_call_state->iteration >= HTTP2_MAX_FRAMES_ITERATIONS) { + break; + } + + current_frame = frames_array[tail_call_state->iteration]; + // Having this condition after assignment and not before is due to a verifier issue. + if (tail_call_state->iteration >= tail_call_state->frames_count) { + break; + } + tail_call_state->iteration += 1; + + is_rst = current_frame.frame.type == kRSTStreamFrame; + is_end_of_stream = (current_frame.frame.flags & HTTP2_END_OF_STREAM) == HTTP2_END_OF_STREAM; + if (!is_rst && !is_end_of_stream) { + continue; + } + + http2_ctx->http2_stream_key.stream_id = current_frame.frame.stream_id; + current_stream = http2_fetch_stream(&http2_ctx->http2_stream_key); + if (current_stream == NULL) { + continue; + } + + // When we accept an RST, it means that the current stream is terminated. + // See: https://datatracker.ietf.org/doc/html/rfc7540#section-6.4 + // If rst, and stream is empty (no status code, or no response) then delete from inflight + if (is_rst && (current_stream->response_status_code == 0 || current_stream->request_started == 0)) { + bpf_map_delete_elem(&http2_in_flight, &http2_ctx->http2_stream_key); + continue; + } + + if (is_rst) { + __sync_fetch_and_add(&http2_tel->end_of_stream_rst, 1); + } else if ((current_frame.frame.flags & HTTP2_END_OF_STREAM) == HTTP2_END_OF_STREAM) { + __sync_fetch_and_add(&http2_tel->end_of_stream, 1); + } + handle_end_of_stream(current_stream, &http2_ctx->http2_stream_key, http2_tel); + } + + if (tail_call_state->iteration < HTTP2_MAX_FRAMES_ITERATIONS && + tail_call_state->iteration < tail_call_state->frames_count && + tail_call_state->iteration < HTTP2_MAX_FRAMES_FOR_EOS_PARSER) { + bpf_tail_call_compat(skb, &protocols_progs, PROG_HTTP2_EOS_PARSER); + } + +delete_iteration: + bpf_map_delete_elem(&http2_iterations, &dispatcher_args_copy); + + return 0; +} #endif diff --git a/pkg/network/ebpf/c/protocols/http2/defs.h b/pkg/network/ebpf/c/protocols/http2/defs.h index a3a510ca05b1f..a0a17f24c7bbe 100644 --- a/pkg/network/ebpf/c/protocols/http2/defs.h +++ b/pkg/network/ebpf/c/protocols/http2/defs.h @@ -31,13 +31,13 @@ typedef enum { // Struct which represent the http2 frame by its fields. // Checkout https://datatracker.ietf.org/doc/html/rfc7540#section-4.1 for frame format. -struct http2_frame { +typedef struct { __u32 length : 24; frame_type_t type; __u8 flags; __u8 reserved : 1; __u32 stream_id : 31; -} __attribute__ ((packed)); +} __attribute__ ((packed)) http2_frame_t; /* Header parsing helper macros */ @@ -59,13 +59,13 @@ typedef union { __u8 reserved : 2; } __attribute__((packed)) literal; __u8 raw; -} __attribute__((packed)) field_index; +} __attribute__((packed)) field_index_t; // string_literal_header represents the length of a string as represented in HPACK // (see RFC 7541: 5.2 String Literal Representation). typedef struct { __u8 length : 7; __u8 is_huffman : 1; -} __attribute__((packed)) string_literal_header; +} __attribute__((packed)) string_literal_header_t; #endif diff --git a/pkg/network/ebpf/c/protocols/http2/helpers.h b/pkg/network/ebpf/c/protocols/http2/helpers.h index 447a5414ca231..be7fb98833528 100644 --- a/pkg/network/ebpf/c/protocols/http2/helpers.h +++ b/pkg/network/ebpf/c/protocols/http2/helpers.h @@ -13,7 +13,7 @@ static __always_inline bool is_empty_frame_header(const char *frame) { } // This function reads the http2 frame header and validate the frame. -static __always_inline bool read_http2_frame_header(const char *buf, size_t buf_size, struct http2_frame *out) { +static __always_inline bool read_http2_frame_header(const char *buf, size_t buf_size, http2_frame_t *out) { if (buf == NULL) { return false; } @@ -28,7 +28,7 @@ static __always_inline bool read_http2_frame_header(const char *buf, size_t buf_ // We extract the frame by its shape to fields. // See: https://datatracker.ietf.org/doc/html/rfc7540#section-4.1 - *out = *((struct http2_frame*)buf); + *out = *((http2_frame_t*)buf); out->length = bpf_ntohl(out->length << 8); out->stream_id = bpf_ntohl(out->stream_id << 1); @@ -54,7 +54,7 @@ static __always_inline bool is_http2_preface(const char* buf, __u32 buf_size) { static __always_inline bool is_http2_server_settings(const char* buf, __u32 buf_size) { CHECK_PRELIMINARY_BUFFER_CONDITIONS(buf, buf_size, HTTP2_FRAME_HEADER_SIZE); - struct http2_frame frame_header; + http2_frame_t frame_header; if (!read_http2_frame_header(buf, buf_size, &frame_header)) { return false; } diff --git a/pkg/network/protocols/ebpf.go b/pkg/network/protocols/ebpf.go index dc8b94b1dfa0f..b7b082f34df72 100644 --- a/pkg/network/protocols/ebpf.go +++ b/pkg/network/protocols/ebpf.go @@ -37,8 +37,10 @@ const ( ProgramHTTP2HandleFirstFrame ProgramType = C.PROG_HTTP2_HANDLE_FIRST_FRAME // ProgramHTTP2FrameFilter is the Golang representation of the C.PROG_HTTP2_HANDLE_FRAME enum ProgramHTTP2FrameFilter ProgramType = C.PROG_HTTP2_FRAME_FILTER - // ProgramHTTP2FrameParser is the Golang representation of the C.PROG_HTTP2_FRAME_PARSER enum - ProgramHTTP2FrameParser ProgramType = C.PROG_HTTP2_FRAME_PARSER + // ProgramHTTP2HeadersParser is the Golang representation of the C.PROG_HTTP2_HEADERS_PARSER enum + ProgramHTTP2HeadersParser ProgramType = C.PROG_HTTP2_HEADERS_PARSER + // ProgramHTTP2EOSParser is the Golang representation of the C.PROG_HTTP2_EOS_PARSER enum + ProgramHTTP2EOSParser ProgramType = C.PROG_HTTP2_EOS_PARSER // ProgramKafka is the Golang representation of the C.PROG_KAFKA enum ProgramKafka ProgramType = C.PROG_KAFKA ) diff --git a/pkg/network/protocols/grpc/monitor_test.go b/pkg/network/protocols/grpc/monitor_test.go index 63a579a337179..7e10f080614bf 100644 --- a/pkg/network/protocols/grpc/monitor_test.go +++ b/pkg/network/protocols/grpc/monitor_test.go @@ -513,17 +513,16 @@ func (s *USMgRPCSuite) TestLargeBodiesGRPCScenarios() { }, time.Second*5, time.Millisecond*100, "%v != %v", res, tt.expectedEndpoints) if t.Failed() { - o, err := monitor.DumpMaps("http2_in_flight") + tlw := &ebpftest.TestLogWriter{T: t} + + err := monitor.DumpMaps(tlw, "http2_in_flight") if err != nil { t.Logf("failed dumping http2_in_flight: %s", err) - } else { - t.Log(o) } - o, err = monitor.DumpMaps("http2_dynamic_table") + + err = monitor.DumpMaps(tlw, "http2_dynamic_table") if err != nil { t.Logf("failed dumping http2_dynamic_table: %s", err) - } else { - t.Log(o) } } }) diff --git a/pkg/network/protocols/http/protocol.go b/pkg/network/protocols/http/protocol.go index f4e0106fa2421..683898caa4106 100644 --- a/pkg/network/protocols/http/protocol.go +++ b/pkg/network/protocols/http/protocol.go @@ -9,7 +9,7 @@ package http import ( "fmt" - "strings" + "io" "unsafe" "github.com/cilium/ebpf" @@ -153,14 +153,14 @@ func (p *protocol) Stop(_ *manager.Manager) { } } -func (p *protocol) DumpMaps(output *strings.Builder, mapName string, currentMap *ebpf.Map) { +func (p *protocol) DumpMaps(w io.Writer, mapName string, currentMap *ebpf.Map) { if mapName == inFlightMap { // maps/http_in_flight (BPF_MAP_TYPE_HASH), key ConnTuple, value httpTX - output.WriteString("Map: '" + mapName + "', key: 'ConnTuple', value: 'httpTX'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'ConnTuple', value: 'httpTX'\n") iter := currentMap.Iterate() var key netebpf.ConnTuple var value EbpfTx for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } } } diff --git a/pkg/network/protocols/http2/protocol.go b/pkg/network/protocols/http2/protocol.go index d8522c6fd6743..e85d9bb8c8950 100644 --- a/pkg/network/protocols/http2/protocol.go +++ b/pkg/network/protocols/http2/protocol.go @@ -10,7 +10,7 @@ package http2 import ( "errors" "fmt" - "strings" + "io" "time" "unsafe" @@ -56,7 +56,8 @@ const ( staticTable = "http2_static_table" firstFrameHandlerTailCall = "socket__http2_handle_first_frame" filterTailCall = "socket__http2_filter" - parserTailCall = "socket__http2_frames_parser" + headersParserTailCall = "socket__http2_headers_parser" + eosParserTailCall = "socket__http2_eos_parser" eventStream = "http2" telemetryMap = "http2_telemetry" ) @@ -110,9 +111,16 @@ var Spec = &protocols.ProtocolSpec{ }, { ProgArrayName: protocols.ProtocolDispatcherProgramsMap, - Key: uint32(protocols.ProgramHTTP2FrameParser), + Key: uint32(protocols.ProgramHTTP2HeadersParser), ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: parserTailCall, + EBPFFuncName: headersParserTailCall, + }, + }, + { + ProgArrayName: protocols.ProtocolDispatcherProgramsMap, + Key: uint32(protocols.ProgramHTTP2EOSParser), + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: eosParserTailCall, }, }, }, @@ -275,22 +283,22 @@ func (p *Protocol) Stop(_ *manager.Manager) { // DumpMaps dumps the content of the map represented by mapName & // currentMap, if it used by the eBPF program, to output. -func (p *Protocol) DumpMaps(output *strings.Builder, mapName string, currentMap *ebpf.Map) { +func (p *Protocol) DumpMaps(w io.Writer, mapName string, currentMap *ebpf.Map) { if mapName == inFlightMap { // maps/http2_in_flight (BPF_MAP_TYPE_HASH), key ConnTuple, value httpTX - output.WriteString("Map: '" + mapName + "', key: 'ConnTuple', value: 'httpTX'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'ConnTuple', value: 'httpTX'\n") iter := currentMap.Iterate() var key http2StreamKey var value EbpfTx for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } } else if mapName == dynamicTable { - output.WriteString("Map: '" + mapName + "', key: 'ConnTuple', value: 'httpTX'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'ConnTuple', value: 'httpTX'\n") iter := currentMap.Iterate() var key http2DynamicTableIndex var value http2DynamicTableEntry for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } } } diff --git a/pkg/network/protocols/kafka/protocol.go b/pkg/network/protocols/kafka/protocol.go index b4ea5bcb1d365..3cc9649b39f80 100644 --- a/pkg/network/protocols/kafka/protocol.go +++ b/pkg/network/protocols/kafka/protocol.go @@ -8,7 +8,7 @@ package kafka import ( - "strings" + "io" manager "github.com/DataDog/ebpf-manager" "github.com/cilium/ebpf" @@ -129,7 +129,7 @@ func (p *protocol) Stop(*manager.Manager) { } // DumpMaps empty implementation. -func (p *protocol) DumpMaps(*strings.Builder, string, *ebpf.Map) {} +func (p *protocol) DumpMaps(io.Writer, string, *ebpf.Map) {} func (p *protocol) processKafka(events []EbpfTx) { for i := range events { diff --git a/pkg/network/protocols/protocols.go b/pkg/network/protocols/protocols.go index ecb98fee5ce41..4b277fe46d243 100644 --- a/pkg/network/protocols/protocols.go +++ b/pkg/network/protocols/protocols.go @@ -8,7 +8,7 @@ package protocols import ( - "strings" + "io" manager "github.com/DataDog/ebpf-manager" "github.com/cilium/ebpf" @@ -50,7 +50,7 @@ type Protocol interface { // DumpMaps dumps the content of the map represented by mapName & // currentMap, if it used by the eBPF program, to output. - DumpMaps(output *strings.Builder, mapName string, currentMap *ebpf.Map) + DumpMaps(w io.Writer, mapName string, currentMap *ebpf.Map) // Name returns the protocol name. Name() string diff --git a/pkg/network/tracer/connection/dump.go b/pkg/network/tracer/connection/dump.go index 92c2422d0cbd2..29327a99e5cff 100644 --- a/pkg/network/tracer/connection/dump.go +++ b/pkg/network/tracer/connection/dump.go @@ -9,7 +9,7 @@ package connection import ( - "strings" + "io" "unsafe" "github.com/cilium/ebpf" @@ -24,157 +24,156 @@ import ( ) //nolint:revive // TODO(NET) Fix revive linter -func dumpMapsHandler(manager *manager.Manager, mapName string, currentMap *ebpf.Map) string { - var output strings.Builder +func dumpMapsHandler(w io.Writer, manager *manager.Manager, mapName string, currentMap *ebpf.Map) { switch mapName { case "connectsock_ipv6": // maps/connectsock_ipv6 (BPF_MAP_TYPE_HASH), key C.__u64, value uintptr // C.void* - output.WriteString("Map: '" + mapName + "', key: 'C.__u64', value: 'uintptr // C.void*'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'C.__u64', value: 'uintptr // C.void*'\n") iter := currentMap.Iterate() var key uint64 var value uintptr // C.void* for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case probes.TracerStatusMap: // maps/tracer_status (BPF_MAP_TYPE_HASH), key C.__u64, value tracerStatus - output.WriteString("Map: '" + mapName + "', key: 'C.__u64', value: 'tracerStatus'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'C.__u64', value: 'tracerStatus'\n") iter := currentMap.Iterate() var key uint64 var value offsetguess.TracerStatus for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case probes.ConntrackStatusMap: // maps/conntrack_status (BPF_MAP_TYPE_HASH), key C.__u64, value conntrackStatus - output.WriteString("Map: '" + mapName + "', key: 'C.__u64', value: 'conntrackStatus'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'C.__u64', value: 'conntrackStatus'\n") iter := currentMap.Iterate() var key uint64 var value offsetguess.ConntrackStatus for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case probes.ConntrackMap: // maps/conntrack (BPF_MAP_TYPE_HASH), key ConnTuple, value ConnTuple - output.WriteString("Map: '" + mapName + "', key: 'ConnTuple', value: 'ConnTuple'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'ConnTuple', value: 'ConnTuple'\n") iter := currentMap.Iterate() var key ddebpf.ConnTuple var value ddebpf.ConnTuple for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case probes.ConntrackTelemetryMap: // maps/conntrack_telemetry (BPF_MAP_TYPE_ARRAY), key C.u32, value conntrackTelemetry - output.WriteString("Map: '" + mapName + "', key: 'C.u32', value: 'conntrackTelemetry'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'C.u32', value: 'conntrackTelemetry'\n") var zero uint64 telemetry := &ddebpf.ConntrackTelemetry{} if err := currentMap.Lookup(unsafe.Pointer(&zero), unsafe.Pointer(telemetry)); err != nil { log.Tracef("error retrieving the contrack telemetry struct: %s", err) } - output.WriteString(spew.Sdump(telemetry)) + spew.Fdump(w, telemetry) case probes.SockFDLookupArgsMap: // maps/sockfd_lookup_args (BPF_MAP_TYPE_HASH), key C.__u64, value C.__u32 - output.WriteString("Map: '" + mapName + "', key: 'C.__u64', value: 'C.__u32'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'C.__u64', value: 'C.__u32'\n") iter := currentMap.Iterate() var key uint64 var value uint32 for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case probes.SockByPidFDMap: // maps/sock_by_pid_fd (BPF_MAP_TYPE_HASH), key C.pid_fd_t, value uintptr // C.struct sock* - output.WriteString("Map: '" + mapName + "', key: 'C.pid_fd_t', value: 'uintptr // C.struct sock*'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'C.pid_fd_t', value: 'uintptr // C.struct sock*'\n") iter := currentMap.Iterate() var key ddebpf.PIDFD var value uintptr // C.struct sock* for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case probes.PidFDBySockMap: // maps/pid_fd_by_sock (BPF_MAP_TYPE_HASH), key uintptr // C.struct sock*, value C.pid_fd_t - output.WriteString("Map: '" + mapName + "', key: 'uintptr // C.struct sock*', value: 'C.pid_fd_t'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'uintptr // C.struct sock*', value: 'C.pid_fd_t'\n") iter := currentMap.Iterate() var key uintptr // C.struct sock* var value ddebpf.PIDFD for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case probes.ConnMap: // maps/conn_stats (BPF_MAP_TYPE_HASH), key ConnTuple, value ConnStatsWithTimestamp - output.WriteString("Map: '" + mapName + "', key: 'ConnTuple', value: 'ConnStatsWithTimestamp'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'ConnTuple', value: 'ConnStatsWithTimestamp'\n") iter := currentMap.Iterate() var key ddebpf.ConnTuple var value ddebpf.ConnStats for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case probes.TCPStatsMap: // maps/tcp_stats (BPF_MAP_TYPE_HASH), key ConnTuple, value TCPStats - output.WriteString("Map: '" + mapName + "', key: 'ConnTuple', value: 'TCPStats'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'ConnTuple', value: 'TCPStats'\n") iter := currentMap.Iterate() var key ddebpf.ConnTuple var value ddebpf.TCPStats for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case probes.ConnCloseBatchMap: // maps/conn_close_batch (BPF_MAP_TYPE_HASH), key C.__u32, value batch - output.WriteString("Map: '" + mapName + "', key: 'C.__u32', value: 'batch'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'C.__u32', value: 'batch'\n") iter := currentMap.Iterate() var key uint32 var value ddebpf.Batch for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case "udp_recv_sock": // maps/udp_recv_sock (BPF_MAP_TYPE_HASH), key C.__u64, value C.udp_recv_sock_t - output.WriteString("Map: '" + mapName + "', key: 'C.__u64', value: 'C.udp_recv_sock_t'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'C.__u64', value: 'C.udp_recv_sock_t'\n") iter := currentMap.Iterate() var key uint64 var value ddebpf.UDPRecvSock for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case "udpv6_recv_sock": // maps/udpv6_recv_sock (BPF_MAP_TYPE_HASH), key C.__u64, value C.udp_recv_sock_t - output.WriteString("Map: '" + mapName + "', key: 'C.__u64', value: 'C.udp_recv_sock_t'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'C.__u64', value: 'C.udp_recv_sock_t'\n") iter := currentMap.Iterate() var key uint64 var value ddebpf.UDPRecvSock for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case probes.PortBindingsMap: // maps/port_bindings (BPF_MAP_TYPE_HASH), key portBindingTuple, value C.__u8 - output.WriteString("Map: '" + mapName + "', key: 'portBindingTuple', value: 'C.__u8'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'portBindingTuple', value: 'C.__u8'\n") iter := currentMap.Iterate() var key ddebpf.PortBinding var value uint8 for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case probes.UDPPortBindingsMap: // maps/udp_port_bindings (BPF_MAP_TYPE_HASH), key portBindingTuple, value C.__u8 - output.WriteString("Map: '" + mapName + "', key: 'portBindingTuple', value: 'C.__u8'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'portBindingTuple', value: 'C.__u8'\n") iter := currentMap.Iterate() var key ddebpf.PortBinding var value uint8 for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case "pending_bind": // maps/pending_bind (BPF_MAP_TYPE_HASH), key C.__u64, value C.bind_syscall_args_t - output.WriteString("Map: '" + mapName + "', key: 'C.__u64', value: 'C.bind_syscall_args_t'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'C.__u64', value: 'C.bind_syscall_args_t'\n") iter := currentMap.Iterate() var key uint64 var value ddebpf.BindSyscallArgs for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case probes.TelemetryMap: // maps/telemetry (BPF_MAP_TYPE_ARRAY), key C.u32, value kernelTelemetry - output.WriteString("Map: '" + mapName + "', key: 'C.u32', value: 'kernelTelemetry'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'C.u32', value: 'kernelTelemetry'\n") var zero uint64 telemetry := &ddebpf.Telemetry{} if err := currentMap.Lookup(unsafe.Pointer(&zero), unsafe.Pointer(telemetry)); err != nil { @@ -182,8 +181,6 @@ func dumpMapsHandler(manager *manager.Manager, mapName string, currentMap *ebpf. // so let's just use a trace log log.Tracef("error retrieving the telemetry struct: %s", err) } - output.WriteString(spew.Sdump(telemetry)) + spew.Fdump(w, telemetry) } - - return output.String() } diff --git a/pkg/network/tracer/connection/tracer.go b/pkg/network/tracer/connection/tracer.go index cf8538be4abdd..8d80221fcc758 100644 --- a/pkg/network/tracer/connection/tracer.go +++ b/pkg/network/tracer/connection/tracer.go @@ -12,6 +12,7 @@ import ( "errors" "fmt" "hash" + "io" "math" "sync" "time" @@ -73,7 +74,7 @@ type Tracer interface { // An individual tracer implementation may choose which maps to expose via this function. GetMap(string) *ebpf.Map // DumpMaps (for debugging purpose) returns all maps content by default or selected maps from maps parameter. - DumpMaps(maps ...string) (string, error) + DumpMaps(w io.Writer, maps ...string) error // Type returns the type of the underlying ebpf tracer that is currently loaded Type() TracerType @@ -554,8 +555,8 @@ func (t *tracer) Collect(ch chan<- prometheus.Metric) { } // DumpMaps (for debugging purpose) returns all maps content by default or selected maps from maps parameter. -func (t *tracer) DumpMaps(maps ...string) (string, error) { - return t.m.DumpMaps(maps...) +func (t *tracer) DumpMaps(w io.Writer, maps ...string) error { + return t.m.DumpMaps(w, maps...) } // Type returns the type of the underlying ebpf tracer that is currently loaded diff --git a/pkg/network/tracer/conntracker_test.go b/pkg/network/tracer/conntracker_test.go index bcdc0531eaf8b..1ca924fd64f76 100644 --- a/pkg/network/tracer/conntracker_test.go +++ b/pkg/network/tracer/conntracker_test.go @@ -34,6 +34,7 @@ const ( ) func TestConntrackers(t *testing.T) { + ebpftest.LogLevel(t, "trace") t.Run("netlink", func(t *testing.T) { runConntrackerTest(t, "netlink", setupNetlinkConntracker) }) diff --git a/pkg/network/tracer/offsetguess/conntrack.go b/pkg/network/tracer/offsetguess/conntrack.go index 9bc13098d2745..cb1e74db152dd 100644 --- a/pkg/network/tracer/offsetguess/conntrack.go +++ b/pkg/network/tracer/offsetguess/conntrack.go @@ -145,6 +145,7 @@ func (c *conntrackOffsetGuesser) checkAndUpdateCurrentOffset(mp *ebpf.Map, expec case GuessCtTupleOrigin: c.status.Offset_origin, overlapped = skipOverlaps(c.status.Offset_origin, c.nfConnRanges()) if overlapped { + log.Tracef("offset %v overlaps with another field, skipping", whatString[GuessWhat(c.status.What)]) // adjusted offset from eBPF overlapped with another field, we need to check new offset break } @@ -155,11 +156,14 @@ func (c *conntrackOffsetGuesser) checkAndUpdateCurrentOffset(mp *ebpf.Map, expec c.logAndAdvance(c.status.Offset_origin, GuessCtTupleReply) break } + log.Tracef("%v %d does not match expected %d, incrementing offset %d", + whatString[GuessWhat(c.status.What)], c.status.Saddr, expected.saddr, c.status.Offset_origin) c.status.Offset_origin++ c.status.Offset_origin, _ = skipOverlaps(c.status.Offset_origin, c.nfConnRanges()) case GuessCtTupleReply: c.status.Offset_reply, overlapped = skipOverlaps(c.status.Offset_reply, c.nfConnRanges()) if overlapped { + log.Tracef("offset %v overlaps with another field, skipping", whatString[GuessWhat(c.status.What)]) // adjusted offset from eBPF overlapped with another field, we need to check new offset break } @@ -168,11 +172,14 @@ func (c *conntrackOffsetGuesser) checkAndUpdateCurrentOffset(mp *ebpf.Map, expec c.logAndAdvance(c.status.Offset_reply, GuessCtNet) break } + log.Tracef("%v %d does not match expected %d, incrementing offset %d", + whatString[GuessWhat(c.status.What)], c.status.Saddr, expected.daddr, c.status.Offset_reply) c.status.Offset_reply++ c.status.Offset_reply, _ = skipOverlaps(c.status.Offset_reply, c.nfConnRanges()) case GuessCtNet: c.status.Offset_netns, overlapped = skipOverlaps(c.status.Offset_netns, c.nfConnRanges()) if overlapped { + log.Tracef("offset %v overlaps with another field, skipping", whatString[GuessWhat(c.status.What)]) // adjusted offset from eBPF overlapped with another field, we need to check new offset break } @@ -181,6 +188,8 @@ func (c *conntrackOffsetGuesser) checkAndUpdateCurrentOffset(mp *ebpf.Map, expec c.logAndAdvance(c.status.Offset_netns, GuessNotApplicable) return c.setReadyState(mp) } + log.Tracef("%v %d does not match expected %d, incrementing offset %d", + whatString[GuessWhat(c.status.What)], c.status.Netns, expected.netns, c.status.Offset_netns) c.status.Offset_netns++ c.status.Offset_netns, _ = skipOverlaps(c.status.Offset_netns, c.nfConnRanges()) default: diff --git a/pkg/network/tracer/offsetguess_test.go b/pkg/network/tracer/offsetguess_test.go index 5baf4c1aae62a..5a51dfe0b6fef 100644 --- a/pkg/network/tracer/offsetguess_test.go +++ b/pkg/network/tracer/offsetguess_test.go @@ -126,6 +126,7 @@ func (o offsetT) String() string { } func TestOffsetGuess(t *testing.T) { + ebpftest.LogLevel(t, "trace") ebpftest.TestBuildMode(t, ebpftest.RuntimeCompiled, "", testOffsetGuess) } @@ -156,6 +157,7 @@ func testOffsetGuess(t *testing.T) { consts := map[offsetT]uint64{} for _, c := range _consts { value := c.Value.(uint64) + t.Logf("Guessed offset %v with value %v", c.Name, value) switch c.Name { case "offset_saddr": consts[offsetSaddr] = value diff --git a/pkg/network/tracer/tracer.go b/pkg/network/tracer/tracer.go index b50064d6a6f9f..b395f0e2909f1 100644 --- a/pkg/network/tracer/tracer.go +++ b/pkg/network/tracer/tracer.go @@ -11,6 +11,7 @@ import ( "context" "errors" "fmt" + "io" "sync" "time" @@ -712,19 +713,22 @@ func (t *Tracer) DebugNetworkMaps() (*network.Connections, error) { // DebugEBPFMaps returns all maps registered in the eBPF manager // //nolint:revive // TODO(NET) Fix revive linter -func (t *Tracer) DebugEBPFMaps(maps ...string) (string, error) { - tracerMaps, err := t.ebpfTracer.DumpMaps(maps...) +func (t *Tracer) DebugEBPFMaps(w io.Writer, maps ...string) error { + io.WriteString(w, "tracer:\n") + err := t.ebpfTracer.DumpMaps(w, maps...) if err != nil { - return "", err + return err } - if t.usmMonitor == nil { - return "tracer:\n" + tracerMaps, nil - } - usmMaps, err := t.usmMonitor.DumpMaps(maps...) - if err != nil { - return "", err + + if t.usmMonitor != nil { + io.WriteString(w, "usm_monitor:\n") + err := t.usmMonitor.DumpMaps(w, maps...) + if err != nil { + return err + } } - return "tracer:\n" + tracerMaps + "\nhttp_monitor:\n" + usmMaps, nil + + return nil } // connectionExpired returns true if the passed in connection has expired diff --git a/pkg/network/tracer/tracer_linux_test.go b/pkg/network/tracer/tracer_linux_test.go index bbbff519c971d..8d4aac590cf58 100644 --- a/pkg/network/tracer/tracer_linux_test.go +++ b/pkg/network/tracer/tracer_linux_test.go @@ -40,6 +40,7 @@ import ( ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" rc "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode/runtime" + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/config/sysctl" @@ -382,6 +383,7 @@ func (s *TracerSuite) TestConnectionExpirationRegression() { func (s *TracerSuite) TestConntrackExpiration() { t := s.T() + ebpftest.LogLevel(t, "trace") netlinktestutil.SetupDNAT(t) wg := sync.WaitGroup{} @@ -2027,6 +2029,7 @@ func (s *TracerSuite) TestGetHelpersTelemetry() { } func TestEbpfConntrackerFallback(t *testing.T) { + ebpftest.LogLevel(t, "trace") type testCase struct { enableRuntimeCompiler bool allowPrecompiledFallback bool @@ -2102,6 +2105,7 @@ func TestEbpfConntrackerFallback(t *testing.T) { } func TestConntrackerFallback(t *testing.T) { + ebpftest.LogLevel(t, "trace") cfg := testConfig() cfg.EnableEbpfConntracker = false cfg.AllowNetlinkConntrackerFallback = true diff --git a/pkg/network/tracer/tracer_unsupported.go b/pkg/network/tracer/tracer_unsupported.go index d2216f6ff514a..9501c46495228 100644 --- a/pkg/network/tracer/tracer_unsupported.go +++ b/pkg/network/tracer/tracer_unsupported.go @@ -10,6 +10,7 @@ package tracer import ( "context" + "io" "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/network" @@ -53,8 +54,8 @@ func (t *Tracer) DebugNetworkMaps() (*network.Connections, error) { } // DebugEBPFMaps is not implemented on this OS for Tracer -func (t *Tracer) DebugEBPFMaps(maps ...string) (string, error) { //nolint:revive // TODO fix revive unused-parameter - return "", ebpf.ErrNotImplemented +func (t *Tracer) DebugEBPFMaps(_ io.Writer, _ ...string) error { + return ebpf.ErrNotImplemented } // DebugCachedConntrack is not implemented on this OS for Tracer diff --git a/pkg/network/tracer/tracer_usm_linux_test.go b/pkg/network/tracer/tracer_usm_linux_test.go index b433c8929df11..1554e847c804f 100644 --- a/pkg/network/tracer/tracer_usm_linux_test.go +++ b/pkg/network/tracer/tracer_usm_linux_test.go @@ -334,8 +334,8 @@ func testHTTPSLibrary(t *testing.T, tr *Tracer, fetchCmd, prefetchLibs []string) }, 5*time.Second, 100*time.Millisecond, "couldn't find USM HTTPS stats") if t.Failed() { - o, _ := tr.usmMonitor.DumpMaps("http_in_flight") - t.Logf("http_in_flight: %s", o) + t.Log("http_in_flight: ") + tr.usmMonitor.DumpMaps(&ebpftest.TestLogWriter{T: t}, "http_in_flight") } // check NPM static TLS tag diff --git a/pkg/network/tracer/tracer_windows.go b/pkg/network/tracer/tracer_windows.go index 16fb0a6988a83..702254753fa04 100644 --- a/pkg/network/tracer/tracer_windows.go +++ b/pkg/network/tracer/tracer_windows.go @@ -11,6 +11,7 @@ import ( "context" "errors" "fmt" + "io" "runtime" "sync" "syscall" @@ -239,8 +240,8 @@ func (t *Tracer) DebugNetworkMaps() (*network.Connections, error) { // DebugEBPFMaps is not implemented on this OS for Tracer // //nolint:revive // TODO(WKIT) Fix revive linter -func (t *Tracer) DebugEBPFMaps(maps ...string) (string, error) { - return "", ebpf.ErrNotImplemented +func (t *Tracer) DebugEBPFMaps(_ io.Writer, _ ...string) error { + return ebpf.ErrNotImplemented } // DebugCachedConntrack is not implemented on this OS for Tracer diff --git a/pkg/network/tracer/utils_linux.go b/pkg/network/tracer/utils_linux.go index 176256bbb8803..2477b60ea38a1 100644 --- a/pkg/network/tracer/utils_linux.go +++ b/pkg/network/tracer/utils_linux.go @@ -8,7 +8,13 @@ package tracer import ( + "errors" "fmt" + "strings" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/features" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -52,5 +58,28 @@ func verifyOSVersion(kernelCode kernel.Version, platform string, exclusionList [ if platform == "ubuntu" && kernelCode >= kernel.VersionCode(4, 4, 114) && kernelCode <= kernel.VersionCode(4, 4, 127) { return false, fmt.Errorf("Known bug for kernel %s on platform %s, see: \n- https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1763454", kernelCode, platform) } - return true, nil + + var requiredFuncs = []asm.BuiltinFunc{ + asm.FnMapLookupElem, + asm.FnMapUpdateElem, + asm.FnMapDeleteElem, + asm.FnPerfEventOutput, + asm.FnPerfEventRead, + } + var missingFuncs []string + for _, rf := range requiredFuncs { + if err := features.HaveProgramHelper(ebpf.Kprobe, rf); err != nil { + if errors.Is(err, ebpf.ErrNotSupported) { + missingFuncs = append(missingFuncs, rf.String()) + } else { + return false, fmt.Errorf("error checking for ebpf helper %s support: %w", rf.String(), err) + } + } + } + if len(missingFuncs) == 0 { + return true, nil + } + errMsg := fmt.Sprintf("Kernel unsupported (%s) - ", kernelCode) + errMsg += fmt.Sprintf("required functions missing: %s", strings.Join(missingFuncs, ", ")) + return false, fmt.Errorf(errMsg) } diff --git a/pkg/network/usm/ebpf_gotls.go b/pkg/network/usm/ebpf_gotls.go index 7ebea3a4d6f60..96e8c19842f02 100644 --- a/pkg/network/usm/ebpf_gotls.go +++ b/pkg/network/usm/ebpf_gotls.go @@ -11,11 +11,11 @@ import ( "debug/elf" "errors" "fmt" + "io" "os" "path/filepath" "regexp" "strconv" - "strings" "sync" "time" "unsafe" @@ -263,7 +263,7 @@ func (p *goTLSProgram) PostStart(_ *manager.Manager) error { return nil } -func (p *goTLSProgram) DumpMaps(_ *strings.Builder, _ string, _ *ebpf.Map) {} +func (p *goTLSProgram) DumpMaps(_ io.Writer, _ string, _ *ebpf.Map) {} func (p *goTLSProgram) GetStats() *protocols.ProtocolStats { return nil diff --git a/pkg/network/usm/ebpf_javatls.go b/pkg/network/usm/ebpf_javatls.go index 4687f0cd22f64..01803f015939b 100644 --- a/pkg/network/usm/ebpf_javatls.go +++ b/pkg/network/usm/ebpf_javatls.go @@ -10,6 +10,7 @@ package usm import ( "bytes" "fmt" + "io" "math/rand" "os" "path/filepath" @@ -279,7 +280,7 @@ func (p *javaTLSProgram) Stop(*manager.Manager) { } } -func (p *javaTLSProgram) DumpMaps(*strings.Builder, string, *ebpf.Map) {} +func (p *javaTLSProgram) DumpMaps(io.Writer, string, *ebpf.Map) {} func (p *javaTLSProgram) GetStats() *protocols.ProtocolStats { return nil diff --git a/pkg/network/usm/ebpf_main.go b/pkg/network/usm/ebpf_main.go index f41c7ea6f7762..c096bb44ec032 100644 --- a/pkg/network/usm/ebpf_main.go +++ b/pkg/network/usm/ebpf_main.go @@ -10,8 +10,8 @@ package usm import ( "errors" "fmt" + "io" "math" - "strings" "time" "unsafe" @@ -435,25 +435,22 @@ func getAssetName(module string, debug bool) string { return fmt.Sprintf("%s.o", module) } -func (e *ebpfProgram) dumpMapsHandler(_ *manager.Manager, mapName string, currentMap *ebpf.Map) string { - var output strings.Builder - +func (e *ebpfProgram) dumpMapsHandler(w io.Writer, _ *manager.Manager, mapName string, currentMap *ebpf.Map) { switch mapName { case connectionStatesMap: // maps/connection_states (BPF_MAP_TYPE_HASH), key C.conn_tuple_t, value C.__u32 - output.WriteString("Map: '" + mapName + "', key: 'C.conn_tuple_t', value: 'C.__u32'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'C.conn_tuple_t', value: 'C.__u32'\n") iter := currentMap.Iterate() var key http.ConnTuple var value uint32 for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } default: // Go through enabled protocols in case one of them now how to handle the current map for _, p := range e.enabledProtocols { - p.Instance.DumpMaps(&output, mapName, currentMap) + p.Instance.DumpMaps(w, mapName, currentMap) } } - return output.String() } func (e *ebpfProgram) getProtocolStats() map[protocols.ProtocolType]interface{} { diff --git a/pkg/network/usm/ebpf_ssl.go b/pkg/network/usm/ebpf_ssl.go index 350fa3e719bb7..d3c8be80babd8 100644 --- a/pkg/network/usm/ebpf_ssl.go +++ b/pkg/network/usm/ebpf_ssl.go @@ -11,6 +11,7 @@ import ( "bytes" "debug/elf" "fmt" + "io" "os" "path/filepath" "regexp" @@ -505,51 +506,51 @@ func (o *sslProgram) Stop(*manager.Manager) { o.istioMonitor.Stop() } -func (o *sslProgram) DumpMaps(output *strings.Builder, mapName string, currentMap *ebpf.Map) { +func (o *sslProgram) DumpMaps(w io.Writer, mapName string, currentMap *ebpf.Map) { switch mapName { case sslSockByCtxMap: // maps/ssl_sock_by_ctx (BPF_MAP_TYPE_HASH), key uintptr // C.void *, value C.ssl_sock_t - output.WriteString("Map: '" + mapName + "', key: 'uintptr // C.void *', value: 'C.ssl_sock_t'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'uintptr // C.void *', value: 'C.ssl_sock_t'\n") iter := currentMap.Iterate() var key uintptr // C.void * var value http.SslSock for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case "ssl_read_args": // maps/ssl_read_args (BPF_MAP_TYPE_HASH), key C.__u64, value C.ssl_read_args_t - output.WriteString("Map: '" + mapName + "', key: 'C.__u64', value: 'C.ssl_read_args_t'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'C.__u64', value: 'C.ssl_read_args_t'\n") iter := currentMap.Iterate() var key uint64 var value http.SslReadArgs for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case "bio_new_socket_args": // maps/bio_new_socket_args (BPF_MAP_TYPE_HASH), key C.__u64, value C.__u32 - output.WriteString("Map: '" + mapName + "', key: 'C.__u64', value: 'C.__u32'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'C.__u64', value: 'C.__u32'\n") iter := currentMap.Iterate() var key uint64 var value uint32 for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case "fd_by_ssl_bio": // maps/fd_by_ssl_bio (BPF_MAP_TYPE_HASH), key C.__u32, value uintptr // C.void * - output.WriteString("Map: '" + mapName + "', key: 'C.__u32', value: 'uintptr // C.void *'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'C.__u32', value: 'uintptr // C.void *'\n") iter := currentMap.Iterate() var key uint32 var value uintptr // C.void * for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } case "ssl_ctx_by_pid_tgid": // maps/ssl_ctx_by_pid_tgid (BPF_MAP_TYPE_HASH), key C.__u64, value uintptr // C.void * - output.WriteString("Map: '" + mapName + "', key: 'C.__u64', value: 'uintptr // C.void *'\n") + io.WriteString(w, "Map: '"+mapName+"', key: 'C.__u64', value: 'uintptr // C.void *'\n") iter := currentMap.Iterate() var key uint64 var value uintptr // C.void * for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { - output.WriteString(spew.Sdump(key, value)) + spew.Fdump(w, key, value) } } diff --git a/pkg/network/usm/monitor.go b/pkg/network/usm/monitor.go index dc027f36b20ab..dc3dcb7656688 100644 --- a/pkg/network/usm/monitor.go +++ b/pkg/network/usm/monitor.go @@ -10,6 +10,7 @@ package usm import ( "errors" "fmt" + "io" "syscall" "time" @@ -193,6 +194,6 @@ func (m *Monitor) Stop() { } // DumpMaps dumps the maps associated with the monitor -func (m *Monitor) DumpMaps(maps ...string) (string, error) { - return m.ebpfProgram.DumpMaps(maps...) +func (m *Monitor) DumpMaps(w io.Writer, maps ...string) error { + return m.ebpfProgram.DumpMaps(w, maps...) } diff --git a/pkg/network/usm/monitor_test.go b/pkg/network/usm/monitor_test.go index 47531d7ca7b78..5507ca2eb7b63 100644 --- a/pkg/network/usm/monitor_test.go +++ b/pkg/network/usm/monitor_test.go @@ -811,11 +811,9 @@ func (s *USMHTTP2Suite) TestSimpleHTTP2() { t.Logf("key: %v was not found in res", key.Path.Content.Get()) } } - o, err := monitor.DumpMaps("http2_in_flight") + err := monitor.DumpMaps(&ebpftest.TestLogWriter{T: t}, "http2_in_flight") if err != nil { t.Logf("failed dumping http2_in_flight: %s", err) - } else { - t.Log(o) } } }) @@ -1006,11 +1004,9 @@ func assertAllRequestsExists(t *testing.T, monitor *Monitor, requests []*nethttp }, 3*time.Second, time.Millisecond*100, "connection not found") if t.Failed() { - o, err := monitor.DumpMaps("http_in_flight") + err := monitor.DumpMaps(&ebpftest.TestLogWriter{T: t}, "http_in_flight") if err != nil { t.Logf("failed dumping http_in_flight: %s", err) - } else { - t.Log(o) } for reqIndex, exists := range requestsExist { diff --git a/pkg/network/usm/monitor_testutil.go b/pkg/network/usm/monitor_testutil.go index a91359ec71bd8..b64b792b79b0f 100644 --- a/pkg/network/usm/monitor_testutil.go +++ b/pkg/network/usm/monitor_testutil.go @@ -8,7 +8,7 @@ package usm import ( - "strings" + "io" "testing" "github.com/cilium/ebpf" @@ -66,8 +66,8 @@ func (p *protocolMock) Stop(mgr *manager.Manager) { } } -func (p *protocolMock) DumpMaps(*strings.Builder, string, *ebpf.Map) {} -func (p *protocolMock) GetStats() *protocols.ProtocolStats { return nil } +func (p *protocolMock) DumpMaps(io.Writer, string, *ebpf.Map) {} +func (p *protocolMock) GetStats() *protocols.ProtocolStats { return nil } // IsBuildModeSupported returns always true, as java tls module is supported by all modes. func (*protocolMock) IsBuildModeSupported(buildmode.Type) bool { return true } diff --git a/pkg/obfuscate/go.mod b/pkg/obfuscate/go.mod index 336d14fab237a..474ef98fce7d5 100644 --- a/pkg/obfuscate/go.mod +++ b/pkg/obfuscate/go.mod @@ -4,7 +4,7 @@ go 1.20 require ( github.com/DataDog/datadog-go/v5 v5.1.1 - github.com/DataDog/go-sqllexer v0.0.8 + github.com/DataDog/go-sqllexer v0.0.9 github.com/outcaste-io/ristretto v0.2.1 github.com/stretchr/testify v1.8.4 go.uber.org/atomic v1.10.0 diff --git a/pkg/obfuscate/go.sum b/pkg/obfuscate/go.sum index ecaa8334abdb9..8f60e065253ad 100644 --- a/pkg/obfuscate/go.sum +++ b/pkg/obfuscate/go.sum @@ -1,7 +1,7 @@ github.com/DataDog/datadog-go/v5 v5.1.1 h1:JLZ6s2K1pG2h9GkvEvMdEGqMDyVLEAccdX5TltWcLMU= github.com/DataDog/datadog-go/v5 v5.1.1/go.mod h1:KhiYb2Badlv9/rofz+OznKoEF5XKTonWyhx5K83AP8E= -github.com/DataDog/go-sqllexer v0.0.8 h1:vfC8R9PhmJfeOKcFYAX9UOd890A3wu3KrjU9Kr7nM0E= -github.com/DataDog/go-sqllexer v0.0.8/go.mod h1:nB4Ea2YNsqMwtbWMc4Fm/oP98IIrSPapqwOwPioMspY= +github.com/DataDog/go-sqllexer v0.0.9 h1:Cx2Cu1S0hfj4coCCA8hzjM9+UNFRkcu1avIV//RU5Qw= +github.com/DataDog/go-sqllexer v0.0.9/go.mod h1:nB4Ea2YNsqMwtbWMc4Fm/oP98IIrSPapqwOwPioMspY= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= diff --git a/pkg/obfuscate/obfuscate.go b/pkg/obfuscate/obfuscate.go index f0789eb04a075..ed30de603b494 100644 --- a/pkg/obfuscate/obfuscate.go +++ b/pkg/obfuscate/obfuscate.go @@ -165,6 +165,16 @@ type SQLConfig struct { // This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize". KeepPositionalParameter bool `json:"keep_positional_parameter" yaml:"keep_positional_parameter"` + // KeepTrailingSemicolon specifies whether to keep trailing semicolon. + // By default, trailing semicolon is removed during normalization. + // This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize". + KeepTrailingSemicolon bool `json:"keep_trailing_semicolon" yaml:"keep_trailing_semicolon"` + + // KeepIdentifierQuotation specifies whether to keep identifier quotation, e.g. "my_table" or [my_table]. + // By default, identifier quotation is removed during normalization. + // This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize". + KeepIdentifierQuotation bool `json:"keep_identifier_quotation" yaml:"keep_identifier_quotation"` + // Cache reports whether the obfuscator should use a LRU look-up cache for SQL obfuscations. Cache bool } diff --git a/pkg/obfuscate/sql.go b/pkg/obfuscate/sql.go index cf30343abedfe..807c9fb5f6a89 100644 --- a/pkg/obfuscate/sql.go +++ b/pkg/obfuscate/sql.go @@ -458,6 +458,8 @@ func (o *Obfuscator) ObfuscateWithSQLLexer(in string, opts *SQLConfig) (*Obfusca sqllexer.WithCollectProcedures(opts.CollectProcedures), sqllexer.WithKeepSQLAlias(opts.KeepSQLAlias), sqllexer.WithRemoveSpaceBetweenParentheses(opts.RemoveSpaceBetweenParentheses), + sqllexer.WithKeepTrailingSemicolon(opts.KeepTrailingSemicolon), + sqllexer.WithKeepIdentifierQuotation(opts.KeepIdentifierQuotation), ) out, statementMetadata, err := sqllexer.ObfuscateAndNormalize( in, diff --git a/pkg/obfuscate/sql_test.go b/pkg/obfuscate/sql_test.go index d8aa1a1521703..0ca3b39ff63e9 100644 --- a/pkg/obfuscate/sql_test.go +++ b/pkg/obfuscate/sql_test.go @@ -2135,6 +2135,8 @@ func TestSQLLexerObfuscationAndNormalization(t *testing.T) { keepNull bool keepBoolean bool keepPositionalParameter bool + keepTrailingSemicolon bool + keepIdentifierQuotation bool metadata SQLMetadata }{ { @@ -2344,6 +2346,36 @@ func TestSQLLexerObfuscationAndNormalization(t *testing.T) { Procedures: []string{}, }, }, + { + name: "normalization with keep trailing semicolon", + query: "SELECT * FROM users WHERE id = 1 AND name = 'test';", + expected: "SELECT * FROM users WHERE id = ? AND name = ?;", + keepTrailingSemicolon: true, + metadata: SQLMetadata{ + Size: 11, + TablesCSV: "users", + Commands: []string{ + "SELECT", + }, + Comments: []string{}, + Procedures: []string{}, + }, + }, + { + name: "normalization with keep identifier quotation", + query: `SELECT * FROM "users" WHERE id = 1 AND name = 'test'`, + expected: `SELECT * FROM "users" WHERE id = ? AND name = ?`, + keepIdentifierQuotation: true, + metadata: SQLMetadata{ + Size: 11, + TablesCSV: "users", + Commands: []string{ + "SELECT", + }, + Comments: []string{}, + Procedures: []string{}, + }, + }, } for _, tt := range tests { @@ -2362,6 +2394,8 @@ func TestSQLLexerObfuscationAndNormalization(t *testing.T) { KeepBoolean: tt.keepBoolean, KeepPositionalParameter: tt.keepPositionalParameter, RemoveSpaceBetweenParentheses: tt.removeSpaceBetweenParentheses, + KeepTrailingSemicolon: tt.keepTrailingSemicolon, + KeepIdentifierQuotation: tt.keepIdentifierQuotation, }, }).ObfuscateSQLString(tt.query) require.NoError(t, err) diff --git a/pkg/security/config/config.go b/pkg/security/config/config.go index 5a6d88539c7f8..3a723c84a1ad7 100644 --- a/pkg/security/config/config.go +++ b/pkg/security/config/config.go @@ -47,6 +47,8 @@ type RuntimeSecurityConfig struct { PolicyMonitorEnabled bool // PolicyMonitorPerRuleEnabled enabled per-rule policy monitoring PolicyMonitorPerRuleEnabled bool + // PolicyMonitorReportInternalPolicies enable internal policies monitoring + PolicyMonitorReportInternalPolicies bool // SocketPath is the path to the socket that is used to communicate with the security agent SocketPath string // EventServerBurst defines the maximum burst of events that can be sent over the grpc server @@ -257,10 +259,11 @@ func NewRuntimeSecurityConfig() (*RuntimeSecurityConfig, error) { RemoteConfigurationEnabled: isRemoteConfigEnabled(), // policy & ruleset - PoliciesDir: coreconfig.SystemProbe.GetString("runtime_security_config.policies.dir"), - WatchPoliciesDir: coreconfig.SystemProbe.GetBool("runtime_security_config.policies.watch_dir"), - PolicyMonitorEnabled: coreconfig.SystemProbe.GetBool("runtime_security_config.policies.monitor.enabled"), - PolicyMonitorPerRuleEnabled: coreconfig.SystemProbe.GetBool("runtime_security_config.policies.monitor.per_rule_enabled"), + PoliciesDir: coreconfig.SystemProbe.GetString("runtime_security_config.policies.dir"), + WatchPoliciesDir: coreconfig.SystemProbe.GetBool("runtime_security_config.policies.watch_dir"), + PolicyMonitorEnabled: coreconfig.SystemProbe.GetBool("runtime_security_config.policies.monitor.enabled"), + PolicyMonitorPerRuleEnabled: coreconfig.SystemProbe.GetBool("runtime_security_config.policies.monitor.per_rule_enabled"), + PolicyMonitorReportInternalPolicies: coreconfig.SystemProbe.GetBool("runtime_security_config.policies.monitor.report_internal_policies"), LogPatterns: coreconfig.SystemProbe.GetStringSlice("runtime_security_config.log_patterns"), LogTags: coreconfig.SystemProbe.GetStringSlice("runtime_security_config.log_tags"), diff --git a/pkg/security/probe/probe_epbfless.go b/pkg/security/probe/probe_epbfless.go index 3bec17a96917e..2bca7a9ef4a93 100644 --- a/pkg/security/probe/probe_epbfless.go +++ b/pkg/security/probe/probe_epbfless.go @@ -96,6 +96,7 @@ func (p *EBPFLessProbe) handleClientMsg(msg *clientMsg) { p.Resolvers.ProcessResolver.AddForkEntry(process.CacheResolverKey{Pid: syscallMsg.PID, NSID: syscallMsg.NSID}, syscallMsg.Fork.PPID) case ebpfless.SyscallTypeOpen: event.Type = uint32(model.FileOpenEventType) + event.Open.Retval = syscallMsg.Retval event.Open.File.PathnameStr = syscallMsg.Open.Filename event.Open.File.BasenameStr = filepath.Base(syscallMsg.Open.Filename) event.Open.Flags = syscallMsg.Open.Flags diff --git a/pkg/security/probe/selftests/tester_linux.go b/pkg/security/probe/selftests/tester_linux.go index 280e510efb2a4..7e4de6b8d9189 100644 --- a/pkg/security/probe/selftests/tester_linux.go +++ b/pkg/security/probe/selftests/tester_linux.go @@ -14,6 +14,8 @@ import ( "go.uber.org/atomic" + "github.com/hashicorp/go-multierror" + "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/probe" "github.com/DataDog/datadog-agent/pkg/security/proto/api" @@ -22,7 +24,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/serializers" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/hashicorp/go-multierror" ) // EventPredicate defines a self test event validation predicate @@ -160,9 +161,10 @@ func (t *SelfTester) Close() error { // LoadPolicies implements the PolicyProvider interface func (t *SelfTester) LoadPolicies(_ []rules.MacroFilter, _ []rules.RuleFilter) ([]*rules.Policy, *multierror.Error) { p := &rules.Policy{ - Name: policyName, - Source: policySource, - Version: policyVersion, + Name: policyName, + Source: policySource, + Version: policyVersion, + IsInternal: true, } for _, selftest := range FileSelfTests { diff --git a/pkg/security/probe/selftests/tester_others.go b/pkg/security/probe/selftests/tester_others.go index fa57a2d9427c7..ece2a0c99b714 100644 --- a/pkg/security/probe/selftests/tester_others.go +++ b/pkg/security/probe/selftests/tester_others.go @@ -8,12 +8,13 @@ package selftests import ( + "github.com/hashicorp/go-multierror" + "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/probe" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/serializers" - "github.com/hashicorp/go-multierror" ) // SelfTester represents all the state needed to conduct rule injection test at startup @@ -44,9 +45,10 @@ func (t *SelfTester) Close() error { // LoadPolicies implements the PolicyProvider interface func (t *SelfTester) LoadPolicies(_ []rules.MacroFilter, _ []rules.RuleFilter) ([]*rules.Policy, *multierror.Error) { p := &rules.Policy{ - Name: policyName, - Source: policySource, - Version: policyVersion, + Name: policyName, + Source: policySource, + Version: policyVersion, + IsInternal: true, } return []*rules.Policy{p}, nil diff --git a/pkg/security/proto/ebpfless/msg.go b/pkg/security/proto/ebpfless/msg.go index 37f7c2c990135..f4acbada21c1e 100644 --- a/pkg/security/proto/ebpfless/msg.go +++ b/pkg/security/proto/ebpfless/msg.go @@ -103,6 +103,7 @@ type SyscallMsg struct { NSID uint64 Type SyscallType PID uint32 + Retval int64 ContainerContext *ContainerContext Exec *ExecSyscallMsg Open *OpenSyscallMsg diff --git a/pkg/security/ptracer/cws.go b/pkg/security/ptracer/cws.go index 134314a8463fc..e7831301a5044 100644 --- a/pkg/security/ptracer/cws.go +++ b/pkg/security/ptracer/cws.go @@ -402,6 +402,10 @@ func checkEntryPoint(path string) (string, error) { return name, nil } +func isAcceptedRetval(retval int64) bool { + return retval < 0 && retval != -int64(syscall.EACCES) && retval != -int64(syscall.EPERM) +} + // StartCWSPtracer start the ptracer func StartCWSPtracer(args []string, probeAddr string, creds Creds, verbose bool) error { entry, err := checkEntryPoint(args[0]) @@ -632,11 +636,12 @@ func StartCWSPtracer(args []string, probeAddr string, creds Creds, verbose bool) case ExecveNr, ExecveatNr: send(process.Nr[nr]) case OpenNr, OpenatNr: - if ret := tracer.ReadRet(regs); ret >= 0 { + if ret := tracer.ReadRet(regs); !isAcceptedRetval(ret) { msg, exists := process.Nr[nr] if !exists { return } + msg.Retval = ret send(msg) diff --git a/pkg/security/rules/bundled_policy_provider.go b/pkg/security/rules/bundled_policy_provider.go index 776036494bd8e..df2f8302d3417 100644 --- a/pkg/security/rules/bundled_policy_provider.go +++ b/pkg/security/rules/bundled_policy_provider.go @@ -7,22 +7,36 @@ package rules import ( + "github.com/hashicorp/go-multierror" + + "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/version" - "github.com/hashicorp/go-multierror" ) // BundledPolicyProvider specify the policy provider for bundled policies -type BundledPolicyProvider struct{} +type BundledPolicyProvider struct { + cfg *config.RuntimeSecurityConfig +} + +// NewBundledPolicyProvider returns a new bundled policy provider +func NewBundledPolicyProvider(cfg *config.RuntimeSecurityConfig) *BundledPolicyProvider { + return &BundledPolicyProvider{ + cfg: cfg, + } +} // LoadPolicies implements the PolicyProvider interface func (p *BundledPolicyProvider) LoadPolicies([]rules.MacroFilter, []rules.RuleFilter) ([]*rules.Policy, *multierror.Error) { + bundledPolicyRules := newBundledPolicyRules(p.cfg) + policy := &rules.Policy{} policy.Name = "bundled_policy" policy.Source = "bundled" policy.Version = version.AgentVersion policy.Rules = bundledPolicyRules + policy.IsInternal = true for _, rule := range bundledPolicyRules { rule.Policy = policy diff --git a/pkg/security/rules/bundled_policy_provider_linux.go b/pkg/security/rules/bundled_policy_provider_linux.go index 042d46983516c..e8d0ab5b5bc4d 100644 --- a/pkg/security/rules/bundled_policy_provider_linux.go +++ b/pkg/security/rules/bundled_policy_provider_linux.go @@ -7,15 +7,21 @@ package rules import ( + "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/events" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) -var bundledPolicyRules = []*rules.RuleDefinition{{ - ID: events.RefreshUserCacheRuleID, - Expression: `rename.file.destination.path in [ "/etc/passwd", "/etc/group" ]`, - Actions: []rules.ActionDefinition{{ - InternalCallbackDefinition: &rules.InternalCallbackDefinition{}, - }}, - Silent: true, -}} +func newBundledPolicyRules(cfg *config.RuntimeSecurityConfig) []*rules.RuleDefinition { + if cfg.EBPFLessEnabled { + return []*rules.RuleDefinition{} + } + return []*rules.RuleDefinition{{ + ID: events.RefreshUserCacheRuleID, + Expression: `rename.file.destination.path in [ "/etc/passwd", "/etc/group" ]`, + Actions: []rules.ActionDefinition{{ + InternalCallbackDefinition: &rules.InternalCallbackDefinition{}, + }}, + Silent: true, + }} +} diff --git a/pkg/security/rules/bundled_policy_provider_other.go b/pkg/security/rules/bundled_policy_provider_other.go index 57718ba0c72ff..996356885dccf 100644 --- a/pkg/security/rules/bundled_policy_provider_other.go +++ b/pkg/security/rules/bundled_policy_provider_other.go @@ -8,6 +8,11 @@ // Package rules holds rules related files package rules -import "github.com/DataDog/datadog-agent/pkg/security/secl/rules" +import ( + "github.com/DataDog/datadog-agent/pkg/security/config" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" +) -var bundledPolicyRules = []*rules.RuleDefinition{} +func newBundledPolicyRules(_ *config.RuntimeSecurityConfig) []*rules.RuleDefinition { + return []*rules.RuleDefinition{} +} diff --git a/pkg/security/rules/engine.go b/pkg/security/rules/engine.go index c9cbca6dff07c..068f9a82afe3c 100644 --- a/pkg/security/rules/engine.go +++ b/pkg/security/rules/engine.go @@ -11,6 +11,7 @@ import ( json "encoding/json" "errors" "fmt" + "runtime" "sync" "time" @@ -94,6 +95,16 @@ func NewRuleEngine(evm *eventmonitor.EventMonitor, config *config.RuntimeSecurit return engine, nil } +func getOrigin(cfg *config.RuntimeSecurityConfig) string { + if runtime.GOOS == "linux" { + if cfg.EBPFLessEnabled { + return "ebpfless" + } + return "ebpf" + } + return "" +} + // Start the rule engine func (e *RuleEngine) Start(ctx context.Context, reloadChan <-chan struct{}, wg *sync.WaitGroup) error { // monitor policies @@ -118,7 +129,7 @@ func (e *RuleEngine) Start(ctx context.Context, reloadChan <-chan struct{}, wg * ruleFilters = append(ruleFilters, agentVersionFilter) } - ruleFilterModel, err := NewRuleFilterModel() + ruleFilterModel, err := NewRuleFilterModel(getOrigin(e.config)) if err != nil { return fmt.Errorf("failed to create rule filter: %w", err) } @@ -316,12 +327,12 @@ func (e *RuleEngine) LoadPolicies(providers []rules.PolicyProvider, sendLoadedRe } - policies := monitor.NewPoliciesState(evaluationSet.RuleSets, loadErrs) + policies := monitor.NewPoliciesState(evaluationSet.RuleSets, loadErrs, e.config.PolicyMonitorReportInternalPolicies) e.notifyAPIServer(ruleIDs, policies) if sendLoadedReport { monitor.ReportRuleSetLoaded(e.eventSender, e.statsdClient, policies) - e.policyMonitor.SetPolicies(evaluationSet.GetPolicies(), loadErrs) + e.policyMonitor.SetPolicies(policies) } return nil @@ -335,7 +346,7 @@ func (e *RuleEngine) notifyAPIServer(ruleIDs []rules.RuleID, policies []*monitor func (e *RuleEngine) gatherDefaultPolicyProviders() []rules.PolicyProvider { var policyProviders []rules.PolicyProvider - policyProviders = append(policyProviders, &BundledPolicyProvider{}) + policyProviders = append(policyProviders, NewBundledPolicyProvider(e.config)) // add remote config as config provider if enabled. if e.config.RemoteConfigurationEnabled { @@ -520,6 +531,8 @@ func logLoadingErrors(msg string, m *multierror.Error) { } else { seclog.Warnf(msg, rErr.Error()) } + } else { + seclog.Errorf(msg, err.Error()) } } } diff --git a/pkg/security/rules/monitor/policy_monitor.go b/pkg/security/rules/monitor/policy_monitor.go index 421fe537828bd..036c8c7e67205 100644 --- a/pkg/security/rules/monitor/policy_monitor.go +++ b/pkg/security/rules/monitor/policy_monitor.go @@ -35,55 +35,56 @@ const ( policyMetricRate = 30 * time.Second ) -// Policy describes policy related information -type Policy struct { - Name string - Source string - Version string +// policy describes policy related information +type policy struct { + name string + source string + version string } -// RuleStatus defines status of rules -type RuleStatus = map[eval.RuleID]string +// ruleStatus defines status of rules +type ruleStatus = map[eval.RuleID]string // PolicyMonitor defines a policy monitor type PolicyMonitor struct { sync.RWMutex statsdClient statsd.ClientInterface - policies map[string]Policy - rules RuleStatus + policies []*policy + rules ruleStatus perRuleMetricEnabled bool } -// SetPolicies add policies to the monitor -func (p *PolicyMonitor) SetPolicies(policies []*rules.Policy, mErrs *multierror.Error) { - p.Lock() - defer p.Unlock() +// SetPolicies sets the policies to monitor +func (pm *PolicyMonitor) SetPolicies(policies []*PolicyState) { + pm.Lock() + defer pm.Unlock() - p.policies = map[string]Policy{} - - for _, policy := range policies { - p.policies[policy.Name] = Policy{Name: policy.Name, Source: policy.Source, Version: policy.Version} + pm.policies = make([]*policy, 0, len(policies)) + if pm.perRuleMetricEnabled { + pm.rules = make(ruleStatus) + } - for _, rule := range policy.Rules { - p.rules[rule.ID] = "loaded" - } + for _, p := range policies { + pm.policies = append(pm.policies, &policy{ + name: p.Name, + source: p.Source, + version: p.Version, + }) - if mErrs != nil && mErrs.Errors != nil { - for _, err := range mErrs.Errors { - if rerr, ok := err.(*rules.ErrRuleLoad); ok { - p.rules[rerr.Definition.ID] = string(rerr.Type()) - } + if pm.perRuleMetricEnabled { + for _, rule := range p.Rules { + pm.rules[eval.RuleID(rule.ID)] = rule.Status } } } } // ReportHeartbeatEvent sends HeartbeatEvents reporting the current set of policies -func (p *PolicyMonitor) ReportHeartbeatEvent(sender events.EventSender) { - p.RLock() - rule, events := NewHeartbeatEvents(p.policies) - p.RUnlock() +func (pm *PolicyMonitor) ReportHeartbeatEvent(sender events.EventSender) { + pm.RLock() + rule, events := newHeartbeatEvents(pm.policies) + pm.RUnlock() for _, event := range events { sender.SendEvent(rule, event, nil, "") @@ -91,7 +92,7 @@ func (p *PolicyMonitor) ReportHeartbeatEvent(sender events.EventSender) { } // Start the monitor -func (p *PolicyMonitor) Start(ctx context.Context) { +func (pm *PolicyMonitor) Start(ctx context.Context) { go func() { timerMetric := time.NewTicker(policyMetricRate) defer timerMetric.Stop() @@ -102,34 +103,34 @@ func (p *PolicyMonitor) Start(ctx context.Context) { return case <-timerMetric.C: - p.RLock() - for _, policy := range p.policies { + pm.RLock() + for _, p := range pm.policies { tags := []string{ - "policy_name:" + policy.Name, - "policy_source:" + policy.Source, - "policy_version:" + policy.Version, + "policy_name:" + p.name, + "policy_source:" + p.source, + "policy_version:" + p.version, "agent_version:" + version.AgentVersion, } - if err := p.statsdClient.Gauge(metrics.MetricPolicy, 1, tags, 1.0); err != nil { + if err := pm.statsdClient.Gauge(metrics.MetricPolicy, 1, tags, 1.0); err != nil { log.Error(fmt.Errorf("failed to send policy metric: %w", err)) } } - if p.perRuleMetricEnabled { - for id, status := range p.rules { + if pm.perRuleMetricEnabled { + for id, status := range pm.rules { tags := []string{ "rule_id:" + id, fmt.Sprintf("status:%v", status), constants.CardinalityTagPrefix + collectors.LowCardinalityString, } - if err := p.statsdClient.Gauge(metrics.MetricRulesStatus, 1, tags, 1.0); err != nil { + if err := pm.statsdClient.Gauge(metrics.MetricRulesStatus, 1, tags, 1.0); err != nil { log.Error(fmt.Errorf("failed to send policy metric: %w", err)) } } } - p.RUnlock() + pm.RUnlock() } } }() @@ -139,8 +140,6 @@ func (p *PolicyMonitor) Start(ctx context.Context) { func NewPolicyMonitor(statsdClient statsd.ClientInterface, perRuleMetricEnabled bool) *PolicyMonitor { return &PolicyMonitor{ statsdClient: statsdClient, - policies: make(map[string]Policy), - rules: make(map[string]string), perRuleMetricEnabled: perRuleMetricEnabled, } } @@ -151,9 +150,9 @@ type RuleSetLoadedReport struct { Event *events.CustomEvent } -// ReportRuleSetLoaded reports to Datadog that new ruleset was loaded +// ReportRuleSetLoaded reports to Datadog that a new ruleset was loaded func ReportRuleSetLoaded(sender events.EventSender, statsdClient statsd.ClientInterface, policies []*PolicyState) { - rule, event := NewRuleSetLoadedEvent(policies) + rule, event := newRuleSetLoadedEvent(policies) if err := statsdClient.Count(metrics.MetricRuleSetLoaded, 1, []string{}, 1.0); err != nil { log.Error(fmt.Errorf("failed to send ruleset_loaded metric: %w", err)) @@ -228,7 +227,7 @@ func RuleStateFromDefinition(def *rules.RuleDefinition, status string, message s } // NewPoliciesState returns the states of policies and rules -func NewPoliciesState(ruleSets map[string]*rules.RuleSet, err *multierror.Error) []*PolicyState { +func NewPoliciesState(ruleSets map[string]*rules.RuleSet, err *multierror.Error, includeInternalPolicies bool) []*PolicyState { mp := make(map[string]*PolicyState) var policyState *PolicyState @@ -236,6 +235,10 @@ func NewPoliciesState(ruleSets map[string]*rules.RuleSet, err *multierror.Error) for _, rs := range ruleSets { for _, rule := range rs.GetRules() { + if rule.Definition.Policy.IsInternal && !includeInternalPolicies { + continue + } + ruleDef := rule.Definition policyName := ruleDef.Policy.Name @@ -251,6 +254,9 @@ func NewPoliciesState(ruleSets map[string]*rules.RuleSet, err *multierror.Error) if err != nil && err.Errors != nil { for _, err := range err.Errors { if rerr, ok := err.(*rules.ErrRuleLoad); ok { + if rerr.Definition.Policy.IsInternal && !includeInternalPolicies { + continue + } policyName := rerr.Definition.Policy.Name if _, exists := mp[policyName]; !exists { @@ -272,8 +278,8 @@ func NewPoliciesState(ruleSets map[string]*rules.RuleSet, err *multierror.Error) return policies } -// NewRuleSetLoadedEvent returns the rule (e.g. ruleset_loaded) and a populated custom event for a new_rules_loaded event -func NewRuleSetLoadedEvent(policies []*PolicyState) (*rules.Rule, *events.CustomEvent) { +// newRuleSetLoadedEvent returns the rule (e.g. ruleset_loaded) and a populated custom event for a new_rules_loaded event +func newRuleSetLoadedEvent(policies []*PolicyState) (*rules.Rule, *events.CustomEvent) { evt := RulesetLoadedEvent{ Policies: policies, } @@ -283,15 +289,15 @@ func NewRuleSetLoadedEvent(policies []*PolicyState) (*rules.Rule, *events.Custom events.NewCustomEvent(model.CustomRulesetLoadedEventType, evt) } -// NewHeartbeatEvents returns the rule (e.g. heartbeat) and a populated custom event for a heartbeat event -func NewHeartbeatEvents(policies map[string]Policy) (*rules.Rule, []*events.CustomEvent) { +// newHeartbeatEvents returns the rule (e.g. heartbeat) and a populated custom event for a heartbeat event +func newHeartbeatEvents(policies []*policy) (*rules.Rule, []*events.CustomEvent) { var evts []*events.CustomEvent for _, policy := range policies { var policyState = PolicyState{ - Name: policy.Name, - Version: policy.Version, - Source: policy.Source, + Name: policy.name, + Version: policy.version, + Source: policy.source, Rules: nil, // The rules that have been loaded at startup are not reported in the heartbeat event } diff --git a/pkg/security/rules/monitor/policy_monitor_easyjson.go b/pkg/security/rules/monitor/policy_monitor_easyjson.go index 7494866f9165e..d03c2f281c6e6 100644 --- a/pkg/security/rules/monitor/policy_monitor_easyjson.go +++ b/pkg/security/rules/monitor/policy_monitor_easyjson.go @@ -17,7 +17,7 @@ var ( _ easyjson.Marshaler ) -func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules(in *jlexer.Lexer, out *RulesetLoadedEvent) { +func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor(in *jlexer.Lexer, out *RulesetLoadedEvent) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -83,7 +83,7 @@ func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules(in *jlex in.Consumed() } } -func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules(out *jwriter.Writer, in RulesetLoadedEvent) { +func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor(out *jwriter.Writer, in RulesetLoadedEvent) { out.RawByte('{') first := true _ = first @@ -122,14 +122,14 @@ func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules(out *jwr // MarshalEasyJSON supports easyjson.Marshaler interface func (v RulesetLoadedEvent) MarshalEasyJSON(w *jwriter.Writer) { - easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules(w, v) + easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *RulesetLoadedEvent) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules(l, v) + easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor(l, v) } -func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules1(in *jlexer.Lexer, out *RuleState) { +func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor1(in *jlexer.Lexer, out *RuleState) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -188,7 +188,7 @@ func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules1(in *jle in.Consumed() } } -func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules1(out *jwriter.Writer, in RuleState) { +func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor1(out *jwriter.Writer, in RuleState) { out.RawByte('{') first := true _ = first @@ -241,14 +241,14 @@ func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules1(out *jw // MarshalEasyJSON supports easyjson.Marshaler interface func (v RuleState) MarshalEasyJSON(w *jwriter.Writer) { - easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules1(w, v) + easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor1(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *RuleState) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules1(l, v) + easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor1(l, v) } -func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules2(in *jlexer.Lexer, out *PolicyState) { +func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor2(in *jlexer.Lexer, out *PolicyState) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -314,7 +314,7 @@ func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules2(in *jle in.Consumed() } } -func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules2(out *jwriter.Writer, in PolicyState) { +func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor2(out *jwriter.Writer, in PolicyState) { out.RawByte('{') first := true _ = first @@ -358,14 +358,14 @@ func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules2(out *jw // MarshalEasyJSON supports easyjson.Marshaler interface func (v PolicyState) MarshalEasyJSON(w *jwriter.Writer) { - easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules2(w, v) + easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor2(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *PolicyState) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules2(l, v) + easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor2(l, v) } -func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules3(in *jlexer.Lexer, out *HeartbeatEvent) { +func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor3(in *jlexer.Lexer, out *HeartbeatEvent) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -410,7 +410,7 @@ func easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules3(in *jle in.Consumed() } } -func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules3(out *jwriter.Writer, in HeartbeatEvent) { +func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor3(out *jwriter.Writer, in HeartbeatEvent) { out.RawByte('{') first := true _ = first @@ -438,10 +438,10 @@ func easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules3(out *jw // MarshalEasyJSON supports easyjson.Marshaler interface func (v HeartbeatEvent) MarshalEasyJSON(w *jwriter.Writer) { - easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRules3(w, v) + easyjson6151911dEncodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor3(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *HeartbeatEvent) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRules3(l, v) + easyjson6151911dDecodeGithubComDataDogDatadogAgentPkgSecurityRulesMonitor3(l, v) } diff --git a/pkg/security/rules/rule_filters_model.go b/pkg/security/rules/rule_filters_model.go index 5a76c2db7385e..a8b1018170b5e 100644 --- a/pkg/security/rules/rule_filters_model.go +++ b/pkg/security/rules/rule_filters_model.go @@ -31,7 +31,7 @@ func (e *RuleFilterEvent) GetFieldType(field eval.Field) (reflect.Kind, error) { case "kernel.version.major", "kernel.version.minor", "kernel.version.patch", "kernel.version.abi": return reflect.Int, nil case "kernel.version.flavor", - "os", "os.id", "os.platform_id", "os.version_id": + "os", "os.id", "os.platform_id", "os.version_id", "envs": return reflect.String, nil case "os.is_amazon_linux", "os.is_cos", "os.is_debian", "os.is_oracle", "os.is_rhel", "os.is_rhel7", "os.is_rhel8", "os.is_sles", "os.is_sles12", "os.is_sles15": diff --git a/pkg/security/rules/rule_filters_model_linux.go b/pkg/security/rules/rule_filters_model_linux.go index f3fd957826bab..85ce425d73188 100644 --- a/pkg/security/rules/rule_filters_model_linux.go +++ b/pkg/security/rules/rule_filters_model_linux.go @@ -9,6 +9,7 @@ package rules import ( + "os" "runtime" "github.com/DataDog/datadog-agent/pkg/security/ebpf/kernel" @@ -18,21 +19,24 @@ import ( // RuleFilterEvent defines a rule filter event type RuleFilterEvent struct { *kernel.Version + origin string } // RuleFilterModel defines a filter model type RuleFilterModel struct { *kernel.Version + origin string } // NewRuleFilterModel returns a new rule filter model -func NewRuleFilterModel() (*RuleFilterModel, error) { +func NewRuleFilterModel(origin string) (*RuleFilterModel, error) { kv, err := kernel.NewKernelVersion() if err != nil { return nil, err } return &RuleFilterModel{ Version: kv, + origin: origin, }, nil } @@ -40,6 +44,7 @@ func NewRuleFilterModel() (*RuleFilterModel, error) { func (m *RuleFilterModel) NewEvent() eval.Event { return &RuleFilterEvent{ Version: m.Version, + origin: m.origin, } } @@ -174,6 +179,16 @@ func (m *RuleFilterModel) GetEvaluator(field eval.Field, _ eval.RegisterID) (eva EvalFnc: func(ctx *eval.Context) bool { return ctx.Event.(*RuleFilterEvent).IsSuse15Kernel() }, Field: field, }, nil + case "envs": + return &eval.StringArrayEvaluator{ + Values: os.Environ(), + Field: field, + }, nil + case "origin": + return &eval.StringEvaluator{ + Value: m.origin, + Field: field, + }, nil } return nil, &eval.ErrFieldNotFound{Field: field} @@ -237,6 +252,10 @@ func (e *RuleFilterEvent) GetFieldValue(field eval.Field) (interface{}, error) { return e.IsSuse12Kernel(), nil case "os.is_sles15": return e.IsSuse15Kernel(), nil + case "envs": + return os.Environ(), nil + case "origin": + return e.origin, nil } return nil, &eval.ErrFieldNotFound{Field: field} diff --git a/pkg/security/rules/rule_filters_model_other.go b/pkg/security/rules/rule_filters_model_other.go index d58cef58b2055..74939c9ef50a6 100644 --- a/pkg/security/rules/rule_filters_model_other.go +++ b/pkg/security/rules/rule_filters_model_other.go @@ -9,6 +9,7 @@ package rules import ( + "os" "runtime" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" @@ -16,20 +17,26 @@ import ( // RuleFilterEvent represents a rule filtering event type RuleFilterEvent struct { + origin string } // RuleFilterModel represents a rule fitlering model type RuleFilterModel struct { + origin string } // NewRuleFilterModel returns a new rule filtering model -func NewRuleFilterModel() (*RuleFilterModel, error) { - return &RuleFilterModel{}, nil +func NewRuleFilterModel(origin string) (*RuleFilterModel, error) { + return &RuleFilterModel{ + origin: origin, + }, nil } // NewEvent returns a new rule filtering event func (m *RuleFilterModel) NewEvent() eval.Event { - return &RuleFilterEvent{} + return &RuleFilterEvent{ + origin: m.origin, + } } // GetEvaluator returns a new evaluator for a rule filtering field @@ -59,6 +66,16 @@ func (m *RuleFilterModel) GetEvaluator(field eval.Field, _ eval.RegisterID) (eva Value: false, Field: field, }, nil + + case "envs": + return &eval.StringArrayEvaluator{ + Values: os.Environ(), + }, nil + case "origin": + return &eval.StringEvaluator{ + Value: m.origin, + Field: field, + }, nil } return nil, &eval.ErrFieldNotFound{Field: field} @@ -79,6 +96,11 @@ func (e *RuleFilterEvent) GetFieldValue(field eval.Field) (interface{}, error) { case "os.is_amazon_linux", "os.is_cos", "os.is_debian", "os.is_oracle", "os.is_rhel", "os.is_rhel7", "os.is_rhel8", "os.is_sles", "os.is_sles12", "os.is_sles15": return false, nil + + case "envs": + return os.Environ(), nil + case "origin": + return e.origin, nil } return nil, &eval.ErrFieldNotFound{Field: field} diff --git a/pkg/security/secl/go.mod b/pkg/security/secl/go.mod index 53004a73f45ae..58ecd393a3d21 100644 --- a/pkg/security/secl/go.mod +++ b/pkg/security/secl/go.mod @@ -18,7 +18,7 @@ require ( golang.org/x/exp v0.0.0-20221114191408-850992195362 golang.org/x/sys v0.15.0 golang.org/x/text v0.14.0 - golang.org/x/tools v0.16.0 + golang.org/x/tools v0.16.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 ) diff --git a/pkg/security/secl/go.sum b/pkg/security/secl/go.sum index 47b6a9f3ebad6..220a149bab2bf 100644 --- a/pkg/security/secl/go.sum +++ b/pkg/security/secl/go.sum @@ -96,8 +96,8 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= -golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/pkg/security/secl/rules/policy.go b/pkg/security/secl/rules/policy.go index 95c9a50586e3d..c28718f5c738b 100644 --- a/pkg/security/secl/rules/policy.go +++ b/pkg/security/secl/rules/policy.go @@ -11,9 +11,10 @@ import ( "fmt" "io" - "github.com/DataDog/datadog-agent/pkg/security/secl/validators" "github.com/hashicorp/go-multierror" "gopkg.in/yaml.v2" + + "github.com/DataDog/datadog-agent/pkg/security/secl/validators" ) // PolicyDef represents a policy file definition @@ -25,11 +26,12 @@ type PolicyDef struct { // Policy represents a policy file which is composed of a list of rules and macros type Policy struct { - Name string - Source string - Version string - Rules []*RuleDefinition - Macros []*MacroDefinition + Name string + Source string + Version string + Rules []*RuleDefinition + Macros []*MacroDefinition + IsInternal bool } // AddMacro add a macro to the policy diff --git a/pkg/security/tests/eventmonitor_test.go b/pkg/security/tests/eventmonitor_test.go index 427f5ae333df9..47041c7d50312 100644 --- a/pkg/security/tests/eventmonitor_test.go +++ b/pkg/security/tests/eventmonitor_test.go @@ -13,6 +13,7 @@ import ( "os/exec" "sync" "testing" + "time" "github.com/avast/retry-go/v4" "github.com/stretchr/testify/assert" @@ -123,7 +124,7 @@ func TestEventMonitor(t *testing.T) { } return errors.New("event not received") - }, retry.Delay(200), retry.Attempts(10)) + }, retry.Delay(200*time.Millisecond), retry.Attempts(10)) assert.Nil(t, err) }) @@ -141,7 +142,7 @@ func TestEventMonitor(t *testing.T) { } return errors.New("event not received") - }, retry.Delay(200), retry.Attempts(10)) + }, retry.Delay(200*time.Millisecond), retry.Attempts(10)) assert.Nil(t, err) }) } diff --git a/pkg/security/tests/module_tester.go b/pkg/security/tests/module_tester.go index b68e6116532c3..eaa5f5fac7830 100644 --- a/pkg/security/tests/module_tester.go +++ b/pkg/security/tests/module_tester.go @@ -1122,7 +1122,7 @@ func (tm *testModule) Run(t *testing.T, name string, fnc func(t *testing.T, kind func (tm *testModule) reloadPolicies() error { log.Debugf("reload policies with cfgDir: %s", commonCfgDir) - bundledPolicyProvider := &rulesmodule.BundledPolicyProvider{} + bundledPolicyProvider := rulesmodule.NewBundledPolicyProvider(tm.eventMonitor.Probe.Config.RuntimeSecurity) policyDirProvider, err := rules.NewPoliciesDirProvider(commonCfgDir, false) if err != nil { return err diff --git a/pkg/security/tests/rule_filters_test.go b/pkg/security/tests/rule_filters_test.go index d84819039fbb0..91caa8dd8f2cc 100644 --- a/pkg/security/tests/rule_filters_test.go +++ b/pkg/security/tests/rule_filters_test.go @@ -27,7 +27,7 @@ func TestSECLRuleFilter(t *testing.T) { Code: kernel.Kernel5_9, } - m, err := rulesmodule.NewRuleFilterModel() + m, err := rulesmodule.NewRuleFilterModel("") assert.NoError(t, err) m.Version = kv seclRuleFilter := rules.NewSECLRuleFilter(m) diff --git a/pkg/serverless/daemon/routes_test.go b/pkg/serverless/daemon/routes_test.go index ca7a54ea62357..ef617475cc128 100644 --- a/pkg/serverless/daemon/routes_test.go +++ b/pkg/serverless/daemon/routes_test.go @@ -241,13 +241,12 @@ func TestStartEndInvocationSpanParenting(t *testing.T) { expPriority: 1, }, { - // NOTE: sns trace extraction not yet implemented name: "sns", payload: getEventFromFile("sns.json"), expInfSpans: 1, - expTraceID: 0, - expParentID: 0, - expPriority: -128, + expTraceID: 4948377316357291421, + expParentID: 6746998015037429512, + expPriority: 1, }, { name: "sns-sqs", diff --git a/pkg/serverless/invocationlifecycle/init.go b/pkg/serverless/invocationlifecycle/init.go index 00a5341650522..9121052f871b3 100644 --- a/pkg/serverless/invocationlifecycle/init.go +++ b/pkg/serverless/invocationlifecycle/init.go @@ -13,10 +13,8 @@ import ( pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan" - - "github.com/aws/aws-lambda-go/events" - "github.com/DataDog/datadog-agent/pkg/serverless/trigger" + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -106,7 +104,7 @@ func (lp *LifecycleProcessor) initFromDynamoDBStreamEvent(event events.DynamoDBE lp.addTag(tagFunctionTriggerEventSourceArn, trigger.ExtractDynamoDBStreamEventARN(event)) } -func (lp *LifecycleProcessor) initFromEventBridgeEvent(event inferredspan.EventBridgeEvent) { +func (lp *LifecycleProcessor) initFromEventBridgeEvent(event events.EventBridgeEvent) { lp.requestHandler.event = event lp.addTag(tagFunctionTriggerEventSource, eventBridge) lp.addTag(tagFunctionTriggerEventSourceArn, event.Source) diff --git a/pkg/serverless/invocationlifecycle/lifecycle.go b/pkg/serverless/invocationlifecycle/lifecycle.go index 8a2290cc5b9dc..67bb7bc672175 100644 --- a/pkg/serverless/invocationlifecycle/lifecycle.go +++ b/pkg/serverless/invocationlifecycle/lifecycle.go @@ -12,8 +12,6 @@ import ( "strings" "time" - "github.com/aws/aws-lambda-go/events" - "github.com/DataDog/datadog-agent/pkg/aggregator" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" serverlessLog "github.com/DataDog/datadog-agent/pkg/serverless/logs" @@ -21,6 +19,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan" "github.com/DataDog/datadog-agent/pkg/serverless/trace/propagation" "github.com/DataDog/datadog-agent/pkg/serverless/trigger" + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" "github.com/DataDog/datadog-agent/pkg/trace/api" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -191,7 +190,7 @@ func (lp *LifecycleProcessor) OnInvokeStart(startDetails *InvocationStartDetails ev = event lp.initFromKinesisStreamEvent(event) case trigger.EventBridgeEvent: - var event inferredspan.EventBridgeEvent + var event events.EventBridgeEvent if err := json.Unmarshal(payloadBytes, &event); err != nil { log.Debugf("Failed to unmarshal %s event: %s", eventBridge, err) break diff --git a/pkg/serverless/invocationlifecycle/trace_test.go b/pkg/serverless/invocationlifecycle/trace_test.go index eb041158858c4..0b925f9a25be6 100644 --- a/pkg/serverless/invocationlifecycle/trace_test.go +++ b/pkg/serverless/invocationlifecycle/trace_test.go @@ -10,11 +10,11 @@ import ( "testing" "time" - "github.com/aws/aws-lambda-go/events" "github.com/stretchr/testify/assert" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan" + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" "github.com/DataDog/datadog-agent/pkg/trace/sampler" ) diff --git a/pkg/serverless/trace/inferredspan/constants.go b/pkg/serverless/trace/inferredspan/constants.go index cc6ed400ed433..d48e12c6ccc07 100644 --- a/pkg/serverless/trace/inferredspan/constants.go +++ b/pkg/serverless/trace/inferredspan/constants.go @@ -55,11 +55,3 @@ const ( // in the payload headers invocationType = "X-Amz-Invocation-Type" ) - -// EventBridgeEvent is used for unmarshalling a EventBridge event. -// AWS Go libraries do not provide this type of event for deserialization. -type EventBridgeEvent struct { - DetailType string `json:"detail-type"` - Source string `json:"source"` - StartTime string `json:"time"` -} diff --git a/pkg/serverless/trace/inferredspan/span_enrichment.go b/pkg/serverless/trace/inferredspan/span_enrichment.go index c4afae205f47f..c849dae23c09b 100644 --- a/pkg/serverless/trace/inferredspan/span_enrichment.go +++ b/pkg/serverless/trace/inferredspan/span_enrichment.go @@ -12,8 +12,8 @@ import ( "time" "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/aws/aws-lambda-go/events" ) // Define and initialize serviceMapping as a global variable. @@ -287,7 +287,7 @@ func (inferredSpan *InferredSpan) EnrichInferredSpanWithSQSEvent(eventPayload ev // EnrichInferredSpanWithEventBridgeEvent uses the parsed event // payload to enrich the current inferred span. It applies a // specific set of data to the span expected from an EventBridge event. -func (inferredSpan *InferredSpan) EnrichInferredSpanWithEventBridgeEvent(eventPayload EventBridgeEvent) { +func (inferredSpan *InferredSpan) EnrichInferredSpanWithEventBridgeEvent(eventPayload events.EventBridgeEvent) { source := eventPayload.Source serviceName := DetermineServiceName(serviceMapping, source, "lambda_eventbridge", "eventbridge") inferredSpan.IsAsync = true diff --git a/pkg/serverless/trace/inferredspan/span_enrichment_test.go b/pkg/serverless/trace/inferredspan/span_enrichment_test.go index b1203a7f61862..8379c6bcf717c 100644 --- a/pkg/serverless/trace/inferredspan/span_enrichment_test.go +++ b/pkg/serverless/trace/inferredspan/span_enrichment_test.go @@ -12,10 +12,10 @@ import ( "testing" "time" - "github.com/aws/aws-lambda-go/events" "github.com/stretchr/testify/assert" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" ) const ( @@ -632,7 +632,7 @@ func TestRemapsSpecificInferredSpanServiceNamesFromS3Event(t *testing.T) { } func TestEnrichInferredSpanWithEventBridgeEvent(t *testing.T) { - var eventBridgeEvent EventBridgeEvent + var eventBridgeEvent events.EventBridgeEvent _ = json.Unmarshal(getEventFromFile("eventbridge-custom.json"), &eventBridgeEvent) inferredSpan := mockInferredSpan() inferredSpan.EnrichInferredSpanWithEventBridgeEvent(eventBridgeEvent) @@ -646,6 +646,7 @@ func TestEnrichInferredSpanWithEventBridgeEvent(t *testing.T) { assert.Equal(t, "web", span.Type) assert.Equal(t, "aws.eventbridge", span.Meta[operationName]) assert.Equal(t, "eventbridge.custom.event.sender", span.Meta[resourceNames]) + assert.Equal(t, "testdetail", span.Meta[detailType]) assert.True(t, inferredSpan.IsAsync) } @@ -663,7 +664,7 @@ func TestRemapsAllInferredSpanServiceNamesFromEventBridgeEvent(t *testing.T) { } SetServiceMapping(newServiceMapping) // Load the original event - var eventBridgeEvent EventBridgeEvent + var eventBridgeEvent events.EventBridgeEvent _ = json.Unmarshal(getEventFromFile("eventbridge-custom.json"), &eventBridgeEvent) inferredSpan := mockInferredSpan() @@ -699,7 +700,7 @@ func TestRemapsSpecificInferredSpanServiceNamesFromEventBridgeEvent(t *testing.T } SetServiceMapping(newServiceMapping) // Load the original event - var eventBridgeEvent EventBridgeEvent + var eventBridgeEvent events.EventBridgeEvent _ = json.Unmarshal(getEventFromFile("eventbridge-custom.json"), &eventBridgeEvent) inferredSpan := mockInferredSpan() diff --git a/pkg/serverless/trace/propagation/carriers.go b/pkg/serverless/trace/propagation/carriers.go index 562202922b8e1..71b2f50ff9f5d 100644 --- a/pkg/serverless/trace/propagation/carriers.go +++ b/pkg/serverless/trace/propagation/carriers.go @@ -15,8 +15,8 @@ import ( "strconv" "strings" + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" "github.com/DataDog/datadog-agent/pkg/trace/sampler" - "github.com/aws/aws-lambda-go/events" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" ) @@ -34,12 +34,25 @@ const ( var rootRegex = regexp.MustCompile("Root=1-[0-9a-fA-F]{8}-00000000[0-9a-fA-F]{16}") +var ( + errorAWSTraceHeaderMismatch = errors.New("AWSTraceHeader does not match expected regex") + errorAWSTraceHeaderEmpty = errors.New("AWSTraceHeader does not contain trace ID and parent ID") + errorStringNotFound = errors.New("String value not found in _datadog payload") + errorUnsupportedDataType = errors.New("Unsupported DataType in _datadog payload") + errorNoDDContextFound = errors.New("No Datadog trace context found") + errorUnsupportedPayloadType = errors.New("Unsupported type for _datadog payload") + errorUnsupportedTypeType = errors.New("Unsupported type in _datadog payload") + errorUnsupportedValueType = errors.New("Unsupported value type in _datadog payload") + errorUnsupportedTypeValue = errors.New("Unsupported Type in _datadog payload") + errorCouldNotUnmarshal = errors.New("Could not unmarshal the invocation event payload") +) + // extractTraceContextfromAWSTraceHeader extracts trace context from the // AWSTraceHeader directly. Unlike the other carriers in this file, it should // not be passed to the tracer.Propagator, instead extracting context directly. func extractTraceContextfromAWSTraceHeader(value string) (*TraceContext, error) { if !rootRegex.MatchString(value) { - return nil, errors.New("AWSTraceHeader does not match expected regex") + return nil, errorAWSTraceHeaderMismatch } var ( startPart int @@ -86,7 +99,7 @@ func extractTraceContextfromAWSTraceHeader(value string) (*TraceContext, error) tc.SamplingPriority = sampler.PriorityAutoKeep } if tc.TraceID == 0 || tc.ParentID == 0 { - return nil, errors.New("AWSTraceHeader does not contain trace ID and parent ID") + return nil, errorAWSTraceHeaderEmpty } return tc, nil } @@ -108,7 +121,7 @@ func sqsMessageAttrCarrier(attr events.SQSMessageAttribute) (tracer.TextMapReade switch attr.DataType { case "String": if attr.StringValue == nil { - return nil, errors.New("String value not found in _datadog payload") + return nil, errorStringNotFound } bytes = []byte(*attr.StringValue) case "Binary": @@ -116,7 +129,7 @@ func sqsMessageAttrCarrier(attr events.SQSMessageAttribute) (tracer.TextMapReade // MESSAGE DELIVERY option bytes = attr.BinaryValue // No need to decode base64 because already decoded default: - return nil, errors.New("Unsupported DataType in _datadog payload") + return nil, errorUnsupportedDataType } var carrier tracer.TextMapCarrier @@ -126,32 +139,62 @@ func sqsMessageAttrCarrier(attr events.SQSMessageAttribute) (tracer.TextMapReade return carrier, nil } +// snsBody is used to unmarshal only required fields on events.SNSEntity +// types. +type snsBody struct { + MessageAttributes map[string]interface{} +} + // snsSqsMessageCarrier returns the tracer.TextMapReader used to extract trace // context from the body of an events.SQSMessage type. func snsSqsMessageCarrier(event events.SQSMessage) (tracer.TextMapReader, error) { - var body struct { - MessageAttributes map[string]struct { - Type string - Value string - } - } + var body snsBody err := json.Unmarshal([]byte(event.Body), &body) if err != nil { return nil, fmt.Errorf("Error unmarshaling message body: %w", err) } - msgAttrs, ok := body.MessageAttributes[datadogSQSHeader] + return snsEntityCarrier(events.SNSEntity{ + MessageAttributes: body.MessageAttributes, + }) +} + +// snsEntityCarrier returns the tracer.TextMapReader used to extract trace +// context from the attributes of an events.SNSEntity type. +func snsEntityCarrier(event events.SNSEntity) (tracer.TextMapReader, error) { + msgAttrs, ok := event.MessageAttributes[datadogSQSHeader] if !ok { - return nil, errors.New("No Datadog trace context found") + return nil, errorNoDDContextFound } - if msgAttrs.Type != "Binary" { - return nil, errors.New("Unsupported DataType in _datadog payload") + mapAttrs, ok := msgAttrs.(map[string]interface{}) + if !ok { + return nil, errorUnsupportedPayloadType } - attr, err := base64.StdEncoding.DecodeString(string(msgAttrs.Value)) - if err != nil { - return nil, fmt.Errorf("Error decoding binary: %w", err) + + typ, ok := mapAttrs["Type"].(string) + if !ok { + return nil, errorUnsupportedTypeType + } + val, ok := mapAttrs["Value"].(string) + if !ok { + return nil, errorUnsupportedValueType } + + var bytes []byte + var err error + switch typ { + case "Binary": + bytes, err = base64.StdEncoding.DecodeString(val) + if err != nil { + return nil, fmt.Errorf("Error decoding binary: %w", err) + } + case "String": + bytes = []byte(val) + default: + return nil, errorUnsupportedTypeValue + } + var carrier tracer.TextMapCarrier - if err = json.Unmarshal(attr, &carrier); err != nil { + if err = json.Unmarshal(bytes, &carrier); err != nil { return nil, fmt.Errorf("Error unmarshaling the decoded binary: %w", err) } return carrier, nil @@ -166,7 +209,7 @@ type invocationPayload struct { func rawPayloadCarrier(rawPayload []byte) (tracer.TextMapReader, error) { var payload invocationPayload if err := json.Unmarshal(rawPayload, &payload); err != nil { - return nil, errors.New("Could not unmarshal the invocation event payload") + return nil, errorCouldNotUnmarshal } return payload.Headers, nil } diff --git a/pkg/serverless/trace/propagation/carriers_test.go b/pkg/serverless/trace/propagation/carriers_test.go index 102bb55f7c3a1..102646c9be40a 100644 --- a/pkg/serverless/trace/propagation/carriers_test.go +++ b/pkg/serverless/trace/propagation/carriers_test.go @@ -10,8 +10,8 @@ import ( "errors" "testing" + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" "github.com/DataDog/datadog-agent/pkg/trace/sampler" - "github.com/aws/aws-lambda-go/events" "github.com/aws/aws-sdk-go-v2/aws" "github.com/stretchr/testify/assert" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" @@ -169,7 +169,7 @@ func TestSnsSqsMessageCarrier(t *testing.T) { }`, }, expMap: nil, - expErr: errors.New("Error unmarshaling message body: json: cannot unmarshal string into Go struct field .MessageAttributes of type map[string]struct { Type string; Value string }"), + expErr: errors.New("Error unmarshaling message body: json: cannot unmarshal string into Go struct field snsBody.MessageAttributes of type map[string]interface {}"), }, { name: "non-binary-type", @@ -177,14 +177,14 @@ func TestSnsSqsMessageCarrier(t *testing.T) { Body: `{ "MessageAttributes": { "_datadog": { - "Type": "String", + "Type": "Purple", "Value": "Value" } } }`, }, expMap: nil, - expErr: errors.New("Unsupported DataType in _datadog payload"), + expErr: errors.New("Unsupported Type in _datadog payload"), }, { name: "cannot-decode", @@ -243,6 +243,135 @@ func TestSnsSqsMessageCarrier(t *testing.T) { } } +func TestSnsEntityCarrier(t *testing.T) { + testcases := []struct { + name string + event events.SNSEntity + expMap map[string]string + expErr error + }{ + { + name: "no-msg-attrs", + event: events.SNSEntity{}, + expMap: nil, + expErr: errors.New("No Datadog trace context found"), + }, + { + name: "wrong-type-msg-attrs", + event: events.SNSEntity{ + MessageAttributes: map[string]interface{}{ + "_datadog": 12345, + }, + }, + expMap: nil, + expErr: errors.New("Unsupported type for _datadog payload"), + }, + { + name: "wrong-type-type", + event: events.SNSEntity{ + MessageAttributes: map[string]interface{}{ + "_datadog": map[string]interface{}{ + "Type": 12345, + "Value": "Value", + }, + }, + }, + expMap: nil, + expErr: errors.New("Unsupported type in _datadog payload"), + }, + { + name: "wrong-value-type", + event: events.SNSEntity{ + MessageAttributes: map[string]interface{}{ + "_datadog": map[string]interface{}{ + "Type": "Binary", + "Value": 12345, + }, + }, + }, + expMap: nil, + expErr: errors.New("Unsupported value type in _datadog payload"), + }, + { + name: "cannot-decode", + event: events.SNSEntity{ + MessageAttributes: map[string]interface{}{ + "_datadog": map[string]interface{}{ + "Type": "Binary", + "Value": "Value", + }, + }, + }, + expMap: nil, + expErr: errors.New("Error decoding binary: illegal base64 data at input byte 4"), + }, + { + name: "unknown-type", + event: events.SNSEntity{ + MessageAttributes: map[string]interface{}{ + "_datadog": map[string]interface{}{ + "Type": "Purple", + "Value": "Value", + }, + }, + }, + expMap: nil, + expErr: errors.New("Unsupported Type in _datadog payload"), + }, + { + name: "empty-string-encoded", + event: events.SNSEntity{ + MessageAttributes: map[string]interface{}{ + "_datadog": map[string]interface{}{ + "Type": "Binary", + "Value": base64.StdEncoding.EncodeToString([]byte(``)), + }, + }, + }, + expMap: nil, + expErr: errors.New("Error unmarshaling the decoded binary: unexpected end of JSON input"), + }, + { + name: "binary-type", + event: events.SNSEntity{ + MessageAttributes: map[string]interface{}{ + "_datadog": map[string]interface{}{ + "Type": "Binary", + "Value": base64.StdEncoding.EncodeToString([]byte(headersAll)), + }, + }, + }, + expMap: headersMapAll, + expErr: nil, + }, + { + name: "string-type", + event: events.SNSEntity{ + MessageAttributes: map[string]interface{}{ + "_datadog": map[string]interface{}{ + "Type": "String", + "Value": headersAll, + }, + }, + }, + expMap: headersMapAll, + expErr: nil, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + tm, err := snsEntityCarrier(tc.event) + t.Logf("snsEntityCarrier returned TextMapReader=%#v error=%#v", tm, err) + assert.Equal(t, tc.expErr != nil, err != nil) + if tc.expErr != nil && err != nil { + assert.Equal(t, tc.expErr.Error(), err.Error()) + } + assert.Equal(t, tc.expMap, getMapFromCarrier(tm)) + }) + } +} + func TestExtractTraceContextfromAWSTraceHeader(t *testing.T) { ctx := func(trace, parent, priority uint64) *TraceContext { return &TraceContext{ @@ -444,6 +573,24 @@ func TestExtractTraceContextfromAWSTraceHeader(t *testing.T) { expTc: nil, expNoErr: false, }, + { + name: "bad trace id", + value: "Root=1-00000000-000000000000000000000001purple;Parent=0000000000000002;Sampled=1", + expTc: nil, + expNoErr: false, + }, + { + name: "bad parent id", + value: "Root=1-00000000-000000000000000000000001;Parent=0000000000000002purple;Sampled=1", + expTc: nil, + expNoErr: false, + }, + { + name: "zero value trace and parent id", + value: "Root=1-00000000-000000000000000000000000;Parent=0000000000000000;Sampled=1", + expTc: nil, + expNoErr: false, + }, } for _, tc := range testcases { diff --git a/pkg/serverless/trace/propagation/extractor.go b/pkg/serverless/trace/propagation/extractor.go index 7c46920653aea..eb745f4f49175 100644 --- a/pkg/serverless/trace/propagation/extractor.go +++ b/pkg/serverless/trace/propagation/extractor.go @@ -11,9 +11,9 @@ import ( "net/http" "strconv" + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/aws/aws-lambda-go/events" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" ) @@ -32,6 +32,7 @@ var ( errorUnsupportedExtractionType = errors.New("Unsupported event type for trace context extraction") errorNoContextFound = errors.New("No trace context found") errorNoSQSRecordFound = errors.New("No sqs message records found for trace context extraction") + errorNoSNSRecordFound = errors.New("No sns message records found for trace context extraction") errorNoTraceIDFound = errors.New("No trace ID found") errorNoParentIDFound = errors.New("No parent ID found") ) @@ -91,6 +92,14 @@ func (e Extractor) extract(event interface{}) (*TraceContext, error) { } } carrier, err = sqsMessageCarrier(ev) + case events.SNSEvent: + // look for context in just the first message + if len(ev.Records) > 0 { + return e.extract(ev.Records[0].SNS) + } + return nil, errorNoSNSRecordFound + case events.SNSEntity: + carrier, err = snsEntityCarrier(ev) case events.APIGatewayProxyRequest: carrier, err = headersCarrier(ev.Headers) case events.APIGatewayV2HTTPRequest: diff --git a/pkg/serverless/trace/propagation/extractor_test.go b/pkg/serverless/trace/propagation/extractor_test.go index 7e224e60a5173..21a051dea5a95 100644 --- a/pkg/serverless/trace/propagation/extractor_test.go +++ b/pkg/serverless/trace/propagation/extractor_test.go @@ -10,10 +10,11 @@ import ( "encoding/json" "errors" "net/http" + "os" "testing" + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" "github.com/DataDog/datadog-agent/pkg/trace/sampler" - "github.com/aws/aws-lambda-go/events" "github.com/aws/aws-sdk-go-v2/aws" "github.com/stretchr/testify/assert" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace" @@ -150,6 +151,28 @@ var ( } return e } + + eventSnsEntity = func(binHdrs, strHdrs string) events.SNSEntity { + e := events.SNSEntity{} + if len(binHdrs) > 0 && len(strHdrs) == 0 { + e.MessageAttributes = map[string]interface{}{ + "_datadog": map[string]interface{}{ + "Type": "Binary", + "Value": base64.StdEncoding.EncodeToString([]byte(binHdrs)), + }, + } + } else if len(binHdrs) == 0 && len(strHdrs) > 0 { + e.MessageAttributes = map[string]interface{}{ + "_datadog": map[string]interface{}{ + "Type": "String", + "Value": strHdrs, + }, + } + } else if len(binHdrs) > 0 && len(strHdrs) > 0 { + panic("expecting one of binHdrs or strHdrs, not both") + } + return e + } ) func TestNilPropagator(t *testing.T) { @@ -193,6 +216,16 @@ func TestExtractorExtract(t *testing.T) { }, // events.SQSEvent + { + name: "sqs-event-no-records", + events: []interface{}{ + events.SQSEvent{ + Records: []events.SQSMessage{}, + }, + }, + expCtx: nil, + expNoErr: false, + }, { name: "sqs-event-uses-first-record", events: []interface{}{ @@ -288,6 +321,92 @@ func TestExtractorExtract(t *testing.T) { expNoErr: true, }, + // events.SNSEvent + { + name: "sns-event-no-records", + events: []interface{}{ + events.SNSEvent{ + Records: []events.SNSEventRecord{}, + }, + }, + expCtx: nil, + expNoErr: false, + }, + { + name: "sns-event-uses-first-record", + events: []interface{}{ + events.SNSEvent{ + Records: []events.SNSEventRecord{ + // Uses the first message only + {SNS: eventSnsEntity(headersDD, headersNone)}, + {SNS: eventSnsEntity(headersW3C, headersNone)}, + }, + }, + }, + expCtx: ddTraceContext, + expNoErr: true, + }, + { + name: "sqs-event-uses-first-record-empty", + events: []interface{}{ + events.SNSEvent{ + Records: []events.SNSEventRecord{ + // Uses the first message only + {SNS: eventSnsEntity(headersNone, headersNone)}, + {SNS: eventSnsEntity(headersW3C, headersNone)}, + }, + }, + }, + expCtx: nil, + expNoErr: false, + }, + + // events.SNSEntity + { + name: "unable-to-get-carrier", + events: []interface{}{ + events.SNSEntity{}, + }, + expCtx: nil, + expNoErr: false, + }, + { + name: "extraction-error", + events: []interface{}{ + events.SNSEvent{ + Records: []events.SNSEventRecord{ + {SNS: eventSnsEntity(headersNone, headersNone)}, + }, + }, + }, + expCtx: nil, + expNoErr: false, + }, + { + name: "extract-binary", + events: []interface{}{ + events.SNSEvent{ + Records: []events.SNSEventRecord{ + {SNS: eventSnsEntity(headersAll, headersNone)}, + }, + }, + }, + expCtx: w3cTraceContext, + expNoErr: true, + }, + { + name: "extract-string", + events: []interface{}{ + events.SNSEvent{ + Records: []events.SNSEventRecord{ + {SNS: eventSnsEntity(headersNone, headersAll)}, + }, + }, + }, + expCtx: w3cTraceContext, + expNoErr: true, + }, + // events.APIGatewayProxyRequest: { name: "APIGatewayProxyRequest", @@ -404,6 +523,112 @@ func TestExtractorExtract(t *testing.T) { } } +func TestExtractorExtractPayloadJson(t *testing.T) { + testcases := []struct { + filename string + eventTyp string + expCtx *TraceContext + }{ + { + filename: "api-gateway.json", + eventTyp: "APIGatewayProxyRequest", + expCtx: &TraceContext{ + TraceID: 12345, + ParentID: 67890, + SamplingPriority: 2, + }, + }, + { + filename: "sns-batch.json", + eventTyp: "SNSEvent", + expCtx: &TraceContext{ + TraceID: 4948377316357291421, + ParentID: 6746998015037429512, + SamplingPriority: 1, + }, + }, + { + filename: "sns.json", + eventTyp: "SNSEvent", + expCtx: &TraceContext{ + TraceID: 4948377316357291421, + ParentID: 6746998015037429512, + SamplingPriority: 1, + }, + }, + { + filename: "snssqs.json", + eventTyp: "SQSEvent", + expCtx: &TraceContext{ + TraceID: 1728904347387697031, + ParentID: 353722510835624345, + SamplingPriority: 1, + }, + }, + { + filename: "sqs-aws-header.json", + eventTyp: "SQSEvent", + expCtx: &TraceContext{ + TraceID: 12297829382473034410, + ParentID: 13527612320720337851, + SamplingPriority: 1, + }, + }, + { + filename: "sqs-batch.json", + eventTyp: "SQSEvent", + expCtx: &TraceContext{ + TraceID: 2684756524522091840, + ParentID: 7431398482019833808, + SamplingPriority: 1, + }, + }, + { + filename: "sqs.json", + eventTyp: "SQSEvent", + expCtx: &TraceContext{ + TraceID: 2684756524522091840, + ParentID: 7431398482019833808, + SamplingPriority: 1, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.filename, func(t *testing.T) { + body, err := os.ReadFile("../testdata/event_samples/" + tc.filename) + assert.NoError(t, err) + + var ev interface{} + switch tc.eventTyp { + case "APIGatewayProxyRequest": + var event events.APIGatewayProxyRequest + err = json.Unmarshal(body, &event) + assert.NoError(t, err) + ev = event + case "SNSEvent": + var event events.SNSEvent + err = json.Unmarshal(body, &event) + assert.NoError(t, err) + ev = event + case "SQSEvent": + var event events.SQSEvent + err = json.Unmarshal(body, &event) + assert.NoError(t, err) + ev = event + default: + t.Fatalf("bad type: %s", tc.eventTyp) + } + + extractor := Extractor{} + ctx, err := extractor.Extract(ev) + t.Logf("Extract returned TraceContext=%#v error=%#v", ctx, err) + assert.NoError(t, err) + assert.Equal(t, tc.expCtx, ctx) + }) + } +} + func TestPropagationStyle(t *testing.T) { testcases := []struct { name string @@ -430,9 +655,6 @@ func TestPropagationStyle(t *testing.T) { expTraceID: w3c.trace.asUint, }, { - // XXX: This is surprising - // The go tracer is designed to always place the tracecontext propagator first - // see https://github.com/DataDog/dd-trace-go/blob/6a938b3b4054ce036cc60147ab42a86f743fcdd5/ddtrace/tracer/textmap.go#L231 name: "datadog,tracecontext-type-headers-all", propType: "datadog,tracecontext", hdrs: headersAll, diff --git a/pkg/serverless/trace/testdata/install.json b/pkg/serverless/trace/testdata/install.json new file mode 100644 index 0000000000000..55fa36837a999 --- /dev/null +++ b/pkg/serverless/trace/testdata/install.json @@ -0,0 +1 @@ +{"install_id":"000001f5-952a-21ee-9801-0e0c566f5cb7","install_type":"manual","install_time":1701972019} \ No newline at end of file diff --git a/pkg/serverless/trigger/events/events.go b/pkg/serverless/trigger/events/events.go new file mode 100644 index 0000000000000..2bf48358779c4 --- /dev/null +++ b/pkg/serverless/trigger/events/events.go @@ -0,0 +1,336 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2022-present Datadog, Inc. + +// Package events provides a series of drop in replacements for +// "github.com/aws/aws-lambda-go/events". Using these types for json +// unmarshalling event payloads provides huge reduction in processing time. +// This means fewer map/slice allocations since only the fields which we will +// use will be unmarshalled. +package events + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "encoding/json" + "time" + + "github.com/aws/aws-lambda-go/events" +) + +// APIGatewayProxyRequest mirrors events.APIGatewayProxyRequest type, removing +// unused fields. +type APIGatewayProxyRequest struct { + Resource string + Path string + HTTPMethod string + Headers map[string]string + RequestContext APIGatewayProxyRequestContext +} + +// APIGatewayProxyRequestContext mirrors events.APIGatewayProxyRequestContext +// type, removing unused fields. +type APIGatewayProxyRequestContext struct { + Stage string + DomainName string + RequestID string + Path string + HTTPMethod string + RequestTimeEpoch int64 + APIID string +} + +// APIGatewayV2HTTPRequest mirrors events.APIGatewayV2HTTPRequest type, +// removing unused fields. +type APIGatewayV2HTTPRequest struct { + RouteKey string + Headers map[string]string + RequestContext APIGatewayV2HTTPRequestContext +} + +// APIGatewayV2HTTPRequestContext mirrors events.APIGatewayV2HTTPRequestContext +// type, removing unused fields. +type APIGatewayV2HTTPRequestContext struct { + Stage string + RequestID string + APIID string + DomainName string + TimeEpoch int64 + HTTP APIGatewayV2HTTPRequestContextHTTPDescription +} + +// APIGatewayV2HTTPRequestContextHTTPDescription mirrors +// events.APIGatewayV2HTTPRequestContextHTTPDescription type, removing unused +// fields. +type APIGatewayV2HTTPRequestContextHTTPDescription struct { + Method string + Path string + Protocol string + SourceIP string + UserAgent string +} + +// APIGatewayWebsocketProxyRequest mirrors +// events.APIGatewayWebsocketProxyRequest type, removing unused fields. +type APIGatewayWebsocketProxyRequest struct { + Headers map[string]string + RequestContext APIGatewayWebsocketProxyRequestContext +} + +// APIGatewayWebsocketProxyRequestContext mirrors +// events.APIGatewayWebsocketProxyRequestContext type, removing unused fields. +type APIGatewayWebsocketProxyRequestContext struct { + Stage string + RequestID string + APIID string + ConnectionID string + DomainName string + EventType string + MessageDirection string + RequestTimeEpoch int64 + RouteKey string +} + +// APIGatewayCustomAuthorizerRequest mirrors +// events.APIGatewayCustomAuthorizerRequest type, removing unused fields. +type APIGatewayCustomAuthorizerRequest struct { + Type string + AuthorizationToken string + MethodArn string +} + +// APIGatewayCustomAuthorizerRequestTypeRequest mirrors +// events.APIGatewayCustomAuthorizerRequestTypeRequest type, removing unused +// fields. +type APIGatewayCustomAuthorizerRequestTypeRequest struct { + MethodArn string + Resource string + HTTPMethod string + Headers map[string]string + RequestContext APIGatewayCustomAuthorizerRequestTypeRequestContext +} + +// APIGatewayCustomAuthorizerRequestTypeRequestContext mirrors +// events.APIGatewayCustomAuthorizerRequestTypeRequestContext type, removing +// unused fields. +type APIGatewayCustomAuthorizerRequestTypeRequestContext struct { + Path string +} + +// ALBTargetGroupRequest mirrors events.ALBTargetGroupRequest type, removing +// unused fields. +type ALBTargetGroupRequest struct { + HTTPMethod string + Path string + Headers map[string]string + RequestContext ALBTargetGroupRequestContext +} + +// ALBTargetGroupRequestContext mirrors events.ALBTargetGroupRequestContext +// type, removing unused fields. +type ALBTargetGroupRequestContext struct { + ELB ELBContext +} + +// ELBContext mirrors events.ELBContext type, removing unused fields. +type ELBContext struct { + TargetGroupArn string +} + +// CloudWatchEvent mirrors events.CloudWatchEvent type, removing unused fields. +type CloudWatchEvent struct { + Resources []string +} + +// CloudwatchLogsEvent mirrors events.CloudwatchLogsEvent type, removing unused +// fields. +type CloudwatchLogsEvent struct { + AWSLogs CloudwatchLogsRawData +} + +// CloudwatchLogsRawData mirrors events.CloudwatchLogsRawData type, removing +// unused fields. +type CloudwatchLogsRawData struct { + Data string +} + +// Parse returns a struct representing a usable CloudwatchLogs event +func (c CloudwatchLogsRawData) Parse() (d CloudwatchLogsData, err error) { + data, err := base64.StdEncoding.DecodeString(c.Data) + if err != nil { + return + } + + zr, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return + } + defer zr.Close() + + dec := json.NewDecoder(zr) + err = dec.Decode(&d) + + return +} + +// CloudwatchLogsData mirrors events.CloudwatchLogsData type, removing unused +// fields. +type CloudwatchLogsData struct { + LogGroup string +} + +// DynamoDBEvent mirrors events.DynamoDBEvent type, removing unused fields. +type DynamoDBEvent struct { + Records []DynamoDBEventRecord +} + +// DynamoDBEventRecord mirrors events.DynamoDBEventRecord type, removing unused +// fields. +type DynamoDBEventRecord struct { + Change DynamoDBStreamRecord `json:"dynamodb"` + EventID string + EventName string + EventVersion string + EventSourceArn string +} + +// DynamoDBStreamRecord mirrors events.DynamoDBStreamRecord type, removing +// unused fields. +type DynamoDBStreamRecord struct { + ApproximateCreationDateTime events.SecondsEpochTime + SizeBytes int64 + StreamViewType string +} + +// KinesisEvent mirrors events.KinesisEvent type, removing unused fields. +type KinesisEvent struct { + Records []KinesisEventRecord +} + +// KinesisEventRecord mirrors events.KinesisEventRecord type, removing unused +// fields. +type KinesisEventRecord struct { + EventID string + EventName string + EventSourceArn string + EventVersion string + Kinesis KinesisRecord +} + +// KinesisRecord mirrors events.KinesisRecord type, removing unused fields. +type KinesisRecord struct { + ApproximateArrivalTimestamp events.SecondsEpochTime + PartitionKey string +} + +// EventBridgeEvent is used for unmarshalling a EventBridge event. AWS Go +// libraries do not provide this type of event for deserialization. +type EventBridgeEvent struct { + DetailType string `json:"detail-type"` + Source string + StartTime string +} + +// S3Event mirrors events.S3Event type, removing unused fields. +type S3Event struct { + Records []S3EventRecord +} + +// S3EventRecord mirrors events.S3EventRecord type, removing unused fields. +type S3EventRecord struct { + EventSource string + EventTime time.Time + EventName string + S3 S3Entity +} + +// S3Entity mirrors events.S3Entity type, removing unused fields. +type S3Entity struct { + Bucket S3Bucket + Object S3Object +} + +// S3Bucket mirrors events.S3Bucket type, removing unused fields. +type S3Bucket struct { + Name string + Arn string +} + +// S3Object mirrors events.S3Object type, removing unused fields. +type S3Object struct { + Key string + Size int64 + ETag string +} + +// SNSEvent mirrors events.SNSEvent type, removing unused fields. +type SNSEvent struct { + Records []SNSEventRecord +} + +// SNSEventRecord mirrors events.SNSEventRecord type, removing unused fields. +type SNSEventRecord struct { + SNS SNSEntity +} + +// SNSEntity mirrors events.SNSEntity type, removing unused fields. +type SNSEntity struct { + MessageID string + Type string + TopicArn string + MessageAttributes map[string]interface{} + Timestamp time.Time + Subject string +} + +// SQSEvent mirrors events.SQSEvent type, removing unused fields. +type SQSEvent struct { + Records []SQSMessage +} + +// SQSMessage mirrors events.SQSMessage type, removing unused fields. +type SQSMessage struct { + ReceiptHandle string + Body string + Attributes map[string]string + MessageAttributes map[string]SQSMessageAttribute + EventSourceARN string +} + +// SQSMessageAttribute mirrors events.SQSMessageAttribute type, removing unused +// fields. +type SQSMessageAttribute struct { + StringValue *string + BinaryValue []byte + DataType string +} + +// LambdaFunctionURLRequest mirrors events.LambdaFunctionURLRequest type, +// removing unused fields. +type LambdaFunctionURLRequest struct { + Headers map[string]string + RequestContext LambdaFunctionURLRequestContext +} + +// LambdaFunctionURLRequestContext mirrors +// events.LambdaFunctionURLRequestContext type, removing unused fields. +type LambdaFunctionURLRequestContext struct { + RequestID string + APIID string + DomainName string + TimeEpoch int64 + HTTP LambdaFunctionURLRequestContextHTTPDescription +} + +// LambdaFunctionURLRequestContextHTTPDescription mirrors +// events.LambdaFunctionURLRequestContextHTTPDescription type, removing unused +// fields. +type LambdaFunctionURLRequestContextHTTPDescription struct { + Method string + Path string + Protocol string + SourceIP string + UserAgent string +} diff --git a/pkg/serverless/trigger/extractor.go b/pkg/serverless/trigger/extractor.go index 7ba2d90aa54af..f1013e2bbe5ef 100644 --- a/pkg/serverless/trigger/extractor.go +++ b/pkg/serverless/trigger/extractor.go @@ -11,8 +11,9 @@ import ( "strconv" "strings" - "github.com/aws/aws-lambda-go/events" "github.com/aws/aws-sdk-go-v2/aws/arn" + + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" ) // GetAWSPartitionByRegion parses an AWS region and returns an AWS partition diff --git a/pkg/serverless/trigger/extractor_test.go b/pkg/serverless/trigger/extractor_test.go index 757d1c02d98eb..34cb6f4b3c816 100644 --- a/pkg/serverless/trigger/extractor_test.go +++ b/pkg/serverless/trigger/extractor_test.go @@ -11,8 +11,9 @@ import ( "encoding/base64" "testing" - "github.com/aws/aws-lambda-go/events" "github.com/stretchr/testify/assert" + + "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" ) func TestGetAWSPartitionByRegion(t *testing.T) { diff --git a/pkg/trace/go.mod b/pkg/trace/go.mod index 143526229898a..b36b200464112 100644 --- a/pkg/trace/go.mod +++ b/pkg/trace/go.mod @@ -42,7 +42,7 @@ require ( ) require ( - github.com/DataDog/go-sqllexer v0.0.8 // indirect + github.com/DataDog/go-sqllexer v0.0.9 // indirect github.com/DataDog/go-tuf v1.0.2-0.5.2 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups/v3 v3.0.2 // indirect diff --git a/pkg/trace/go.sum b/pkg/trace/go.sum index fda8ab4ab4edd..68748230057c7 100644 --- a/pkg/trace/go.sum +++ b/pkg/trace/go.sum @@ -1,7 +1,7 @@ github.com/DataDog/datadog-go/v5 v5.1.1 h1:JLZ6s2K1pG2h9GkvEvMdEGqMDyVLEAccdX5TltWcLMU= github.com/DataDog/datadog-go/v5 v5.1.1/go.mod h1:KhiYb2Badlv9/rofz+OznKoEF5XKTonWyhx5K83AP8E= -github.com/DataDog/go-sqllexer v0.0.8 h1:vfC8R9PhmJfeOKcFYAX9UOd890A3wu3KrjU9Kr7nM0E= -github.com/DataDog/go-sqllexer v0.0.8/go.mod h1:nB4Ea2YNsqMwtbWMc4Fm/oP98IIrSPapqwOwPioMspY= +github.com/DataDog/go-sqllexer v0.0.9 h1:Cx2Cu1S0hfj4coCCA8hzjM9+UNFRkcu1avIV//RU5Qw= +github.com/DataDog/go-sqllexer v0.0.9/go.mod h1:nB4Ea2YNsqMwtbWMc4Fm/oP98IIrSPapqwOwPioMspY= github.com/DataDog/go-tuf v1.0.2-0.5.2 h1:EeZr937eKAWPxJ26IykAdWA4A0jQXJgkhUjqEI/w7+I= github.com/DataDog/go-tuf v1.0.2-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.9.0 h1:Y+CllChr2yfE2RLd+c5hvK56DDGVEdDrhAl1OyzuuwU= diff --git a/pkg/util/containers/metrics/system/containerid_linux.go b/pkg/util/containers/metrics/system/containerid_linux.go index fde087fdbae88..b7c6f3b90a075 100644 --- a/pkg/util/containers/metrics/system/containerid_linux.go +++ b/pkg/util/containers/metrics/system/containerid_linux.go @@ -17,7 +17,7 @@ import ( const ( selfMountInfoPath = "/proc/self/mountinfo" containerdSandboxPrefix = "sandboxes" - cIDRegexp = `([^\s/]+)/(` + cgroups.ContainerRegexpStr + `)/[\S]*hostname` + cIDRegexp = `.*/([^\s/]+)/(` + cgroups.ContainerRegexpStr + `)/[\S]*hostname` ) var cIDMountInfoRegexp = regexp.MustCompile(cIDRegexp) diff --git a/pkg/util/containers/metrics/system/containerid_linux_test.go b/pkg/util/containers/metrics/system/containerid_linux_test.go index 45dc639ce98f8..e4647318c9b14 100644 --- a/pkg/util/containers/metrics/system/containerid_linux_test.go +++ b/pkg/util/containers/metrics/system/containerid_linux_test.go @@ -32,6 +32,11 @@ func TestParseMountinfo(t *testing.T) { filePath: "./testdata/mountinfo_k8s_agent", wantContainerID: "fc7038bc73a8d3850c66ddbfb0b2901afa378bfcbb942cc384b051767e4ac6b0", }, + { + name: "Kind (containerd in docker)", + filePath: "./testdata/mountinfo_kind", + wantContainerID: "", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/util/containers/metrics/system/testdata/mountinfo_kind b/pkg/util/containers/metrics/system/testdata/mountinfo_kind new file mode 100644 index 0000000000000..cfa0ebd86547c --- /dev/null +++ b/pkg/util/containers/metrics/system/testdata/mountinfo_kind @@ -0,0 +1 @@ +1258 1249 254:1 /docker/volumes/0919c2d87ec8ba99f3c85fdada5fe26eca73b2fce73a5974d6030f30bf91cbaf/_data/lib/containerd/io.containerd.grpc.v1.cri/sandboxes/ca30bb64884083e29b1dc08a1081dd2df123f13f045dadb64dc346e56c0b6871/hostname /etc/hostname rw,relatime - ext4 /dev/vda1 rw,discard \ No newline at end of file diff --git a/pkg/util/hostname/validate/go.mod b/pkg/util/hostname/validate/go.mod new file mode 100644 index 0000000000000..9fdcfaeecfefe --- /dev/null +++ b/pkg/util/hostname/validate/go.mod @@ -0,0 +1,22 @@ +module github.com/DataDog/datadog-agent/pkg/util/hostname/validate + +go 1.20 + +replace ( + github.com/DataDog/datadog-agent/pkg/util/log => ../../log/ + github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../scrubber/ +) + +require ( + github.com/DataDog/datadog-agent/pkg/util/log v0.0.0-00010101000000-000000000000 + github.com/stretchr/testify v1.8.4 +) + +require ( + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.50.0-rc.4 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/pkg/util/hostname/validate/go.sum b/pkg/util/hostname/validate/go.sum new file mode 100644 index 0000000000000..17ed8eaff9cbd --- /dev/null +++ b/pkg/util/hostname/validate/go.sum @@ -0,0 +1,14 @@ +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/util/kubernetes/apiserver/apiserver.go b/pkg/util/kubernetes/apiserver/apiserver.go index fed55cee4a8c4..49df425c75cc8 100644 --- a/pkg/util/kubernetes/apiserver/apiserver.go +++ b/pkg/util/kubernetes/apiserver/apiserver.go @@ -21,16 +21,14 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/util/pointer" "github.com/DataDog/datadog-agent/pkg/util/retry" v1 "k8s.io/api/core/v1" "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" vpa "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" - vpai "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/informers/externalversions" - "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/dynamicinformer" "k8s.io/client-go/informers" @@ -58,12 +56,48 @@ const ( // APIClient provides authenticated access to the // apiserver endpoints. Use the shared instance via GetApiClient. type APIClient struct { + // + // Normal (low timeout) clients + // + + // Cl holds the main kubernetes client + Cl kubernetes.Interface + + // DynamicCl holds a dynamic kubernetes client + DynamicCl dynamic.Interface + + // + // Informer clients (high or no timeout, use for Informers/Watch calls) + // + + // InformerCl holds the main kubernetes client with long TO + InformerCl kubernetes.Interface + + // DynamicCl holds a dynamic kubernetes client with long TO + DynamicInformerCl dynamic.Interface + + // CRDInformerClient holds the extension kubernetes client with long TO + CRDInformerClient clientset.Interface + + // APISInformerClient holds the APIService kubernetes client with long TO + APISInformerClient apiregistrationclient.ApiregistrationV1Interface + + // VPAInformerClient holds kubernetes VerticalPodAutoscalers client with long TO + VPAInformerClient vpa.Interface + + // + // Informer factories (based on informers client) + // Factories are not started by default. + // Factories can be started multiple times safely. + // Factories CANNOT be stopped safely. It means that you can only use these informers + // if informer lifetime == agent lifetime (e.g. not in CLC and not if leader-based). + // + // Use the Informer* clients above to create your own factories if you need to. + // + // InformerFactory gives access to informers. InformerFactory informers.SharedInformerFactory - // UnassignedPodInformerFactory gives access to filtered informers - UnassignedPodInformerFactory informers.SharedInformerFactory - // CertificateSecretInformerFactory gives access to filtered informers // This informer can be used by the Admission Controller to only watch the secret object // that contains the webhook certificate. @@ -74,50 +108,27 @@ type APIClient struct { // the corresponding MutatingWebhookConfiguration object. WebhookConfigInformerFactory informers.SharedInformerFactory - // WPAClient gives access to WPA API - WPAClient dynamic.Interface - // WPAInformerFactory gives access to informers for Watermark Pod Autoscalers. - WPAInformerFactory dynamicinformer.DynamicSharedInformerFactory - - // DDClient gives access to all datadoghq/ custom types - DDClient dynamic.Interface - // DynamicInformerFactory gives access to dynamic informers in example for all datadoghq/ custom types + // DynamicInformerFactory gives access to dynamic informers DynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory - // CRDInformerFactory gives access to informers for all crds - CRDInformerFactory externalversions.SharedInformerFactory + // + // Internal + // // initRetry used to setup the APIClient initRetry retry.Retrier - // Cl holds the main kubernetes client - Cl kubernetes.Interface - - // CRDClient holds the extension kubernetes client - CRDClient clientset.Interface - - // APISClient holds the APIService kubernetes client - APISClient apiregistrationclient.ApiregistrationV1Interface - - // DynamicCl holds a dynamic kubernetes client - DynamicCl dynamic.Interface - - // DiscoveryCl holds kubernetes discovery client - DiscoveryCl discovery.DiscoveryInterface - - // VPAClient holds kubernetes VerticalPodAutoscalers client - VPAClient vpa.Interface - - // VPAInformerFactory - VPAInformerFactory vpai.SharedInformerFactory - - // timeoutSeconds defines the kubernetes client timeout - timeoutSeconds int64 + // Client and informer timeout + defaultClientTimeout time.Duration + defaultInformerTimeout time.Duration + defaultInformerResyncPeriod time.Duration } func initAPIClient() { globalAPIClient = &APIClient{ - timeoutSeconds: config.Datadog.GetInt64("kubernetes_apiserver_client_timeout"), + defaultClientTimeout: time.Duration(config.Datadog.GetInt64("kubernetes_apiserver_client_timeout")) * time.Second, + defaultInformerTimeout: time.Duration(config.Datadog.GetInt64("kubernetes_apiserver_informer_client_timeout")) * time.Second, + defaultInformerResyncPeriod: time.Duration(config.Datadog.GetInt64("kubernetes_informers_resync_period")) * time.Second, } globalAPIClient.initRetry.SetupRetrier(&retry.Config{ //nolint:errcheck Name: "apiserver", @@ -199,13 +210,14 @@ func getClientConfig(timeout time.Duration) (*rest.Config, error) { clientConfig.Timeout = timeout clientConfig.Wrap(func(rt http.RoundTripper) http.RoundTripper { - return NewCustomRoundTripper(rt) + return NewCustomRoundTripper(rt, timeout) }) return clientConfig, nil } // GetKubeClient returns a kubernetes API server client +// You should not use this function except if you need to create a *NEW* Client. func GetKubeClient(timeout time.Duration) (kubernetes.Interface, error) { // TODO: Remove custom warning logger when we remove usage of ComponentStatus rest.SetDefaultWarningHandler(CustomWarningLogger{}) @@ -243,15 +255,6 @@ func getAPISClient(timeout time.Duration) (*apiregistrationclient.Apiregistratio return apiregistrationclient.NewForConfig(clientConfig) } -func getKubeDiscoveryClient(timeout time.Duration) (discovery.DiscoveryInterface, error) { - clientConfig, err := getClientConfig(timeout) - if err != nil { - return nil, err - } - - return discovery.NewDiscoveryClientForConfig(clientConfig) -} - func getKubeVPAClient(timeout time.Duration) (vpa.Interface, error) { clientConfig, err := getClientConfig(timeout) if err != nil { @@ -261,149 +264,72 @@ func getKubeVPAClient(timeout time.Duration) (vpa.Interface, error) { return vpa.NewForConfig(clientConfig) } -// VPAInformerFactory vpai.SharedInformerFactory -func getVPAInformerFactory(client vpa.Interface) (vpai.SharedInformerFactory, error) { - // default to 300s - resyncPeriodSeconds := time.Duration(config.Datadog.GetInt64("kubernetes_informers_resync_period")) - return vpai.NewSharedInformerFactory(client, resyncPeriodSeconds*time.Second), nil -} - -func getWPAInformerFactory() (dynamicinformer.DynamicSharedInformerFactory, error) { - // default to 300s - resyncPeriodSeconds := time.Duration(config.Datadog.GetInt64("kubernetes_informers_resync_period")) - client, err := getKubeDynamicClient(0) // No timeout for the Informers, to allow long watch. - if err != nil { - log.Infof("Could not get apiserver client: %v", err) - return nil, err - } - return dynamicinformer.NewDynamicSharedInformerFactory(client, resyncPeriodSeconds*time.Second), nil -} - -func getDDClient(timeout time.Duration) (dynamic.Interface, error) { - clientConfig, err := getClientConfig(timeout) - if err != nil { - return nil, err +// GetInformerWithOptions returns +func (c *APIClient) GetInformerWithOptions(resyncPeriod *time.Duration, options ...informers.SharedInformerOption) informers.SharedInformerFactory { + if resyncPeriod == nil { + resyncPeriod = &c.defaultInformerResyncPeriod } - return dynamic.NewForConfig(clientConfig) + return informers.NewSharedInformerFactoryWithOptions(c.InformerCl, *resyncPeriod, options...) } -func getDDInformerFactory() (dynamicinformer.DynamicSharedInformerFactory, error) { - // default to 300s - resyncPeriodSeconds := time.Duration(config.Datadog.GetInt64("kubernetes_informers_resync_period")) - client, err := getKubeDynamicClient(0) // No timeout for the Informers, to allow long watch. - if err != nil { - log.Infof("Could not get apiserver client: %v", err) - return nil, err - } - return dynamicinformer.NewDynamicSharedInformerFactory(client, resyncPeriodSeconds*time.Second), nil -} - -func getInformerFactory() (informers.SharedInformerFactory, error) { - resyncPeriodSeconds := time.Duration(config.Datadog.GetInt64("kubernetes_informers_resync_period")) - client, err := GetKubeClient(0) // No timeout for the Informers, to allow long watch. - if err != nil { - log.Errorf("Could not get apiserver client: %v", err) - return nil, err - } - return informers.NewSharedInformerFactory(client, resyncPeriodSeconds*time.Second), nil -} +func (c *APIClient) connect() error { + var err error -func getCRDInformerFactory() (externalversions.SharedInformerFactory, error) { - resyncPeriodSeconds := time.Duration(config.Datadog.GetInt64("kubernetes_informers_resync_period")) - client, err := getCRDClient(0) // No timeout for the Informers, to allow long watch. + // Clients + c.Cl, err = GetKubeClient(c.defaultClientTimeout) if err != nil { - log.Errorf("Could not get apiserver client: %v", err) - return nil, err + log.Infof("Could not get apiserver client: %v", err) + return err } - return externalversions.NewSharedInformerFactory(client, resyncPeriodSeconds*time.Second), nil -} -func getInformerFactoryWithOption(options ...informers.SharedInformerOption) (informers.SharedInformerFactory, error) { - resyncPeriodSeconds := time.Duration(config.Datadog.GetInt64("kubernetes_informers_resync_period")) - client, err := GetKubeClient(0) // No timeout for the Informers, to allow long watch. + c.DynamicCl, err = getKubeDynamicClient(c.defaultClientTimeout) if err != nil { - log.Errorf("Could not get apiserver client: %v", err) - return nil, err + log.Infof("Could not get apiserver dynamic client: %v", err) + return err } - return informers.NewSharedInformerFactoryWithOptions(client, resyncPeriodSeconds*time.Second, options...), nil -} -func (c *APIClient) connect() error { - var err error - c.Cl, err = GetKubeClient(time.Duration(c.timeoutSeconds) * time.Second) + // Informer clients + c.InformerCl, err = GetKubeClient(c.defaultInformerTimeout) if err != nil { log.Infof("Could not get apiserver client: %v", err) return err } - c.DiscoveryCl, err = getKubeDiscoveryClient(time.Duration(c.timeoutSeconds) * time.Second) + c.DynamicInformerCl, err = getKubeDynamicClient(c.defaultInformerTimeout) if err != nil { - log.Infof("Could not get apiserver discovery client: %v", err) + log.Infof("Could not get apiserver dynamic client: %v", err) return err } - c.VPAClient, err = getKubeVPAClient(time.Duration(c.timeoutSeconds) * time.Second) + c.VPAInformerClient, err = getKubeVPAClient(c.defaultInformerTimeout) if err != nil { log.Infof("Could not get apiserver vpa client: %v", err) return err } - c.CRDClient, err = getCRDClient(time.Duration(c.timeoutSeconds) * time.Second) + c.CRDInformerClient, err = getCRDClient(c.defaultInformerTimeout) if err != nil { log.Infof("Could not get apiserver CRDClient client: %v", err) return err } - c.APISClient, err = getAPISClient(time.Duration(c.timeoutSeconds) * time.Second) + c.APISInformerClient, err = getAPISClient(c.defaultInformerTimeout) if err != nil { log.Infof("Could not get apiserver APISClient client: %v", err) return err } + // Creating informers + c.InformerFactory = c.GetInformerWithOptions(nil) + if config.Datadog.GetBool("admission_controller.enabled") || config.Datadog.GetBool("compliance_config.enabled") || config.Datadog.GetBool("orchestrator_explorer.enabled") || + config.Datadog.GetBool("external_metrics_provider.use_datadogmetric_crd") || + config.Datadog.GetBool("external_metrics_provider.wpa_controller") || config.Datadog.GetBool("cluster_checks.enabled") { - c.DynamicCl, err = getKubeDynamicClient(time.Duration(c.timeoutSeconds) * time.Second) - if err != nil { - log.Infof("Could not get apiserver dynamic client: %v", err) - return err - } - } - - // informer factory uses its own clientset with a larger timeout - c.InformerFactory, err = getInformerFactory() - if err != nil { - return err - } - - if config.Datadog.GetBool("orchestrator_explorer.enabled") { - tweakListOptions := func(options *metav1.ListOptions) { - options.FieldSelector = fields.OneTermEqualSelector("spec.nodeName", "").String() - } - c.UnassignedPodInformerFactory, err = getInformerFactoryWithOption( - informers.WithTweakListOptions(tweakListOptions), - ) - if err != nil { - log.Infof("Could not get informer factory: %v", err) - return err - } - if c.CRDInformerFactory, err = getCRDInformerFactory(); err != nil { - _ = log.Errorf("Error getting crd informer Client: %s", err.Error()) - return err - } - if c.DynamicInformerFactory, err = getDDInformerFactory(); err != nil { - _ = log.Errorf("Error getting datadoghq informer Client: %s", err.Error()) - return err - } - - c.VPAInformerFactory, err = getVPAInformerFactory(c.VPAClient) - if err != nil { - log.Infof("Could not get a vpa informer factory: %v", err) - return err - } - + c.DynamicInformerFactory = dynamicinformer.NewDynamicSharedInformerFactory(c.DynamicInformerCl, c.defaultInformerResyncPeriod) } if config.Datadog.GetBool("admission_controller.enabled") { @@ -411,48 +337,21 @@ func (c *APIClient) connect() error { optionsForService := func(options *metav1.ListOptions) { options.FieldSelector = fields.OneTermEqualSelector(nameFieldkey, config.Datadog.GetString("admission_controller.certificate.secret_name")).String() } - c.CertificateSecretInformerFactory, err = getInformerFactoryWithOption( + c.CertificateSecretInformerFactory = c.GetInformerWithOptions( + nil, informers.WithTweakListOptions(optionsForService), informers.WithNamespace(common.GetResourcesNamespace()), ) - if err != nil { - log.Infof("Could not get informer factory: %v", err) - return err - } optionsForWebhook := func(options *metav1.ListOptions) { options.FieldSelector = fields.OneTermEqualSelector(nameFieldkey, config.Datadog.GetString("admission_controller.webhook_name")).String() } - c.WebhookConfigInformerFactory, err = getInformerFactoryWithOption( + c.WebhookConfigInformerFactory = c.GetInformerWithOptions( + nil, informers.WithTweakListOptions(optionsForWebhook), ) - if err != nil { - log.Infof("Could not get informer factory: %v", err) - return err - } - } - if config.Datadog.GetBool("external_metrics_provider.wpa_controller") { - if c.WPAInformerFactory, err = getWPAInformerFactory(); err != nil { - log.Errorf("Error getting WPA Informer Factory: %s", err.Error()) - return err - } - if c.WPAClient, err = getKubeDynamicClient(time.Duration(c.timeoutSeconds) * time.Second); err != nil { - log.Errorf("Error getting WPA Client: %s", err.Error()) - return err - } - } - if config.Datadog.GetBool("external_metrics_provider.use_datadogmetric_crd") { - if c.DynamicInformerFactory, err = getDDInformerFactory(); err != nil { - log.Errorf("Error getting datadoghq Client: %s", err.Error()) - return err - } - if c.DDClient, err = getDDClient(time.Duration(c.timeoutSeconds) * time.Second); err != nil { - log.Errorf("Error getting datadoghq Informer Factory: %s", err.Error()) - return err - } - } // Try to get apiserver version to confim connectivity APIversion := c.Cl.Discovery().RESTClient().APIVersion() if APIversion.Empty() { @@ -478,7 +377,7 @@ func newMetadataMapperBundle() *metadataMapperBundle { // ComponentStatuses returns the component status list from the APIServer func (c *APIClient) ComponentStatuses() (*v1.ComponentStatusList, error) { - return c.Cl.CoreV1().ComponentStatuses().List(context.TODO(), metav1.ListOptions{TimeoutSeconds: &c.timeoutSeconds}) + return c.Cl.CoreV1().ComponentStatuses().List(context.TODO(), metav1.ListOptions{TimeoutSeconds: pointer.Ptr(int64(c.defaultClientTimeout.Seconds()))}) } func (c *APIClient) getOrCreateConfigMap(name, namespace string) (cmEvent *v1.ConfigMap, err error) { @@ -639,7 +538,7 @@ func getMetadataMapBundle(nodeName string) (*metadataMapperBundle, error) { } func getNodeList(cl *APIClient) ([]v1.Node, error) { - nodes, err := cl.Cl.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{TimeoutSeconds: &cl.timeoutSeconds}) + nodes, err := cl.Cl.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{TimeoutSeconds: pointer.Ptr(int64(cl.defaultClientTimeout.Seconds()))}) if err != nil { log.Errorf("Can't list nodes from the API server: %s", err.Error()) return nil, err diff --git a/pkg/util/kubernetes/apiserver/apiserver_kubelet.go b/pkg/util/kubernetes/apiserver/apiserver_kubelet.go index ec0e13cdfd461..ba936ab545faf 100644 --- a/pkg/util/kubernetes/apiserver/apiserver_kubelet.go +++ b/pkg/util/kubernetes/apiserver/apiserver_kubelet.go @@ -16,13 +16,14 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/util/pointer" ) // NodeMetadataMapping only fetch the endpoints from Kubernetes apiserver and add the metadataMapper of the // node to the cache // Only called when the node agent computes the metadata mapper locally and does not rely on the DCA. func (c *APIClient) NodeMetadataMapping(nodeName string, pods []*kubelet.Pod) error { - endpointList, err := c.Cl.CoreV1().Endpoints("").List(context.TODO(), metav1.ListOptions{TimeoutSeconds: &c.timeoutSeconds, ResourceVersion: "0"}) + endpointList, err := c.Cl.CoreV1().Endpoints("").List(context.TODO(), metav1.ListOptions{TimeoutSeconds: pointer.Ptr(int64(c.defaultClientTimeout.Seconds())), ResourceVersion: "0"}) if err != nil { log.Errorf("Could not collect endpoints from the API Server: %q", err.Error()) return err diff --git a/pkg/util/kubernetes/apiserver/controllers.go b/pkg/util/kubernetes/apiserver/controllers.go index 796949a09660c..90c9511138ac7 100644 --- a/pkg/util/kubernetes/apiserver/controllers.go +++ b/pkg/util/kubernetes/apiserver/controllers.go @@ -53,16 +53,14 @@ var controllerCatalog = map[controllerName]controllerFuncs{ // ControllerContext holds all the attributes needed by the controllers type ControllerContext struct { - informers map[InformerName]cache.SharedInformer - InformerFactory informers.SharedInformerFactory - WPAClient dynamic.Interface - WPAInformerFactory dynamicinformer.DynamicSharedInformerFactory - DDClient dynamic.Interface - DDInformerFactory dynamicinformer.DynamicSharedInformerFactory - Client kubernetes.Interface - IsLeaderFunc func() bool - EventRecorder record.EventRecorder - StopCh chan struct{} + informers map[InformerName]cache.SharedInformer + InformerFactory informers.SharedInformerFactory + DynamicClient dynamic.Interface + DynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory + Client kubernetes.Interface + IsLeaderFunc func() bool + EventRecorder record.EventRecorder + StopCh chan struct{} } // StartControllers runs the enabled Kubernetes controllers for the Datadog Cluster Agent. This is @@ -146,8 +144,9 @@ func startAutoscalersController(ctx ControllerContext, c chan error) { c <- err return } - if ctx.WPAInformerFactory != nil { - go autoscalersController.RunWPA(ctx.StopCh, ctx.WPAClient, ctx.WPAInformerFactory) + + if config.Datadog.GetBool("external_metrics_provider.wpa_controller") { + go autoscalersController.RunWPA(ctx.StopCh, ctx.DynamicClient, ctx.DynamicInformerFactory) } autoscalersController.enableHPA(ctx.Client, ctx.InformerFactory) diff --git a/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go b/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go index d0991bac3b6b9..b428b63b2fb83 100644 --- a/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go +++ b/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go @@ -153,7 +153,7 @@ func (le *LeaderEngine) init() error { return err } - serverVersion, err := common.KubeServerVersion(apiClient.DiscoveryCl, 10*time.Second) + serverVersion, err := common.KubeServerVersion(apiClient.Cl.Discovery(), 10*time.Second) if err == nil && semver.IsValid(serverVersion.String()) && semver.Compare(serverVersion.String(), "v1.14.0") < 0 { log.Warn("[DEPRECATION WARNING] DataDog will drop support of Kubernetes older than v1.14. Please update to a newer version to ensure proper functionality and security.") } @@ -161,7 +161,7 @@ func (le *LeaderEngine) init() error { le.coreClient = apiClient.Cl.CoreV1() le.coordClient = apiClient.Cl.CoordinationV1() - usingLease, err := CanUseLeases(apiClient.DiscoveryCl) + usingLease, err := CanUseLeases(apiClient.Cl.Discovery()) if err != nil { log.Errorf("Unable to retrieve available resources: %v", err) return err @@ -349,7 +349,7 @@ func GetLeaderElectionRecord() (leaderDetails rl.LeaderElectionRecord, err error if err != nil { return led, err } - usingLease, err := CanUseLeases(client.DiscoveryCl) + usingLease, err := CanUseLeases(client.Cl.Discovery()) if err != nil { return led, err } diff --git a/pkg/util/kubernetes/apiserver/metadata_controller_test.go b/pkg/util/kubernetes/apiserver/metadata_controller_test.go index 6d99243e3e050..ab8724fc1c0fa 100644 --- a/pkg/util/kubernetes/apiserver/metadata_controller_test.go +++ b/pkg/util/kubernetes/apiserver/metadata_controller_test.go @@ -450,7 +450,7 @@ func TestMetadataController(t *testing.T) { return true }) - cl := &APIClient{Cl: client, timeoutSeconds: 5} + cl := &APIClient{Cl: client, defaultClientTimeout: 5} testutil.AssertTrueBeforeTimeout(t, 100*time.Millisecond, 2*time.Second, func() bool { fullmapper, errList := GetMetadataMapBundleOnAllNodes(cl) @@ -465,7 +465,6 @@ func TestMetadataController(t *testing.T) { assert.Contains(t, services, "nginx-1") return true }) - } func newFakeMetadataController(client kubernetes.Interface) (*MetadataController, informers.SharedInformerFactory) { diff --git a/pkg/util/kubernetes/apiserver/roundtrip.go b/pkg/util/kubernetes/apiserver/roundtrip.go index 0b7364f8b4feb..dafdcbe00a464 100644 --- a/pkg/util/kubernetes/apiserver/roundtrip.go +++ b/pkg/util/kubernetes/apiserver/roundtrip.go @@ -14,7 +14,6 @@ import ( "net/http" "time" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -27,10 +26,10 @@ type CustomRoundTripper struct { // NewCustomRoundTripper creates a new CustomRoundTripper with the apiserver timeout value already populated from the // agent config, wrapping an existing http.RoundTripper. -func NewCustomRoundTripper(rt http.RoundTripper) *CustomRoundTripper { +func NewCustomRoundTripper(rt http.RoundTripper, timeout time.Duration) *CustomRoundTripper { return &CustomRoundTripper{ rt: rt, - timeout: config.Datadog.GetInt64("kubernetes_apiserver_client_timeout"), + timeout: int64(timeout.Seconds()), } } diff --git a/pkg/util/system/cpu_mock.go b/pkg/util/system/cpu_mock.go new file mode 100644 index 0000000000000..92c0794c9fe9b --- /dev/null +++ b/pkg/util/system/cpu_mock.go @@ -0,0 +1,17 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build test + +package system + +const ( + // Arbitrary CPU count used for unit tests + defaultCPUCountUnitTest = 3 +) + +func init() { + hostCPUCount.Store(defaultCPUCountUnitTest) +} diff --git a/pkg/util/system/cpu_test.go b/pkg/util/system/cpu_test.go index 1e3b9396f64b7..443ee4135c227 100644 --- a/pkg/util/system/cpu_test.go +++ b/pkg/util/system/cpu_test.go @@ -3,6 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. +//go:build test + package system import ( @@ -32,6 +34,8 @@ func (f *fakeCPUCount) info(context.Context, bool) (int, error) { } func TestHostCPUCount(t *testing.T) { + defer hostCPUCount.Store(defaultCPUCountUnitTest) + f := newFakeCPUCount(10000, nil) assert.Equal(t, f.count, HostCPUCount()) diff --git a/releasenotes-dca/notes/fix-long-watch-short-cac006ec56e97970.yaml b/releasenotes-dca/notes/fix-long-watch-short-cac006ec56e97970.yaml new file mode 100644 index 0000000000000..c0bd96fefd947 --- /dev/null +++ b/releasenotes-dca/notes/fix-long-watch-short-cac006ec56e97970.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixes a bug that would trigger unnecessary APIServer `List` requests from the Cluster Agent or Cluster Checks Runner. diff --git a/releasenotes/notes/bump-go-sqllexer-with-more-obfuscation-options-ca13fcbeb4c9b299.yaml b/releasenotes/notes/bump-go-sqllexer-with-more-obfuscation-options-ca13fcbeb4c9b299.yaml new file mode 100644 index 0000000000000..de69a7ffb6411 --- /dev/null +++ b/releasenotes/notes/bump-go-sqllexer-with-more-obfuscation-options-ca13fcbeb4c9b299.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + DBM: Add configuration options to SQL obfuscator to customize the normalization of SQL statements: + - ``KeepTrailingSemicolon`` - disable removing trailing semicolon. This option is only valid when ``ObfuscationMode`` is ``obfuscate_and_normalize``. + - ``KeepIdentifierQuotation`` - disable removing quotation marks around identifiers. This option is only valid when ``ObfuscationMode`` is ``obfuscate_and_normalize``. diff --git a/releasenotes/notes/bump-s6-overlay-a4fa7ecd845fe093.yaml b/releasenotes/notes/bump-s6-overlay-a4fa7ecd845fe093.yaml new file mode 100644 index 0000000000000..5f89404e5666b --- /dev/null +++ b/releasenotes/notes/bump-s6-overlay-a4fa7ecd845fe093.yaml @@ -0,0 +1,3 @@ +other: + - | + Update s6-overlay version used in Datadog Agent container images to v2.2.0.3 diff --git a/releasenotes/notes/enable-status-code-aggregation-by-default-a9a70cd3af443439.yaml b/releasenotes/notes/enable-status-code-aggregation-by-default-a9a70cd3af443439.yaml new file mode 100644 index 0000000000000..0a3402ade73d0 --- /dev/null +++ b/releasenotes/notes/enable-status-code-aggregation-by-default-a9a70cd3af443439.yaml @@ -0,0 +1,13 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + USM will report the actual status code of the HTTP traffic, instead of reporting + only the status code family (2xx, 3xx, etc.). + diff --git a/releasenotes/notes/oracle-go-ora-2.81-9d9ad3b1c6f9904f.yaml b/releasenotes/notes/oracle-go-ora-2.81-9d9ad3b1c6f9904f.yaml new file mode 100644 index 0000000000000..f39bc069960b6 --- /dev/null +++ b/releasenotes/notes/oracle-go-ora-2.81-9d9ad3b1c6f9904f.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Bug fix for the Korean character set for Windows. diff --git a/releasenotes/notes/process-agent-improve-parse-stat-content-allocations-8c6fd48fb63649f1.yaml b/releasenotes/notes/process-agent-improve-parse-stat-content-allocations-8c6fd48fb63649f1.yaml index 2c17b344130f6..1b6f312ea1017 100644 --- a/releasenotes/notes/process-agent-improve-parse-stat-content-allocations-8c6fd48fb63649f1.yaml +++ b/releasenotes/notes/process-agent-improve-parse-stat-content-allocations-8c6fd48fb63649f1.yaml @@ -1,3 +1,4 @@ --- enhancements: - - Process-Agent: Improved parsing performance of the '/proc/pid/stat' file (Linux only) + - | + Process-Agent: Improved parsing performance of the '/proc/pid/stat' file (Linux only) diff --git a/repository.datadog.yml b/repository.datadog.yml index d029388b323a6..5ea0d3f7aaa8e 100644 --- a/repository.datadog.yml +++ b/repository.datadog.yml @@ -7,3 +7,10 @@ workflow_type: speculative speculative_max_depth: 3 wait_for_check_timeout_in_minutes: 240 gitlab_jobs_retry_enable: true +--- +schema-version: v1 +kind: buildimpactanalysis +team: ci-interfaces +preprocess: true +enabled_strategies: + - go_strategy diff --git a/tasks/__init__.py b/tasks/__init__.py index a7d50df7a79ca..b6f93d67abe3d 100644 --- a/tasks/__init__.py +++ b/tasks/__init__.py @@ -32,6 +32,7 @@ system_probe, systray, trace_agent, + updater, vscode, ) from .build_tags import audit_tag_impact, print_default_build_tags @@ -153,6 +154,7 @@ ns.add_collection(fakeintake) ns.add_collection(kmt) ns.add_collection(diff) +ns.add_collection(updater) ns.configure( { 'run': { diff --git a/tasks/agent.py b/tasks/agent.py index e2d358e1b2e7d..6a606f87bf1d4 100644 --- a/tasks/agent.py +++ b/tasks/agent.py @@ -17,7 +17,6 @@ from invoke.exceptions import Exit, ParseError from .build_tags import filter_incompatible_tags, get_build_tags, get_default_build_tags -from .docker_tasks import pull_base_images from .flavor import AgentFlavor from .go import deps from .process_agent import build as process_agent_build @@ -74,6 +73,7 @@ WINDOWS_CORECHECKS = [ "agentcrashdetect", + "windows_registry", "winkmem", "wincrashdetect", ] @@ -314,7 +314,7 @@ def system_tests(_): @task -def image_build(ctx, arch='amd64', base_dir="omnibus", python_version="2", skip_tests=False, signed_pull=True): +def image_build(ctx, arch='amd64', base_dir="omnibus", python_version="2", skip_tests=False, tag=None, push=False): """ Build the docker image """ @@ -336,9 +336,11 @@ def image_build(ctx, arch='amd64', base_dir="omnibus", python_version="2", skip_ raise Exit(code=1) latest_file = max(list_of_files, key=os.path.getctime) shutil.copy2(latest_file, build_context) - # Pull base image with content trust enabled - pull_base_images(ctx, dockerfile_path, signed_pull) - common_build_opts = f"-t {AGENT_TAG} -f {dockerfile_path}" + + if tag is None: + tag = AGENT_TAG + + common_build_opts = f"-t {tag} -f {dockerfile_path}" if python_version not in BOTH_VERSIONS: common_build_opts = f"{common_build_opts} --build-arg PYTHON_VERSION={python_version}" @@ -348,6 +350,9 @@ def image_build(ctx, arch='amd64', base_dir="omnibus", python_version="2", skip_ # Build with the release target ctx.run(f"docker build {common_build_opts} --platform linux/{arch} --target release {build_context}") + if push: + ctx.run(f"docker push {tag}") + ctx.run(f"rm {build_context}/{deb_glob}") diff --git a/tasks/kmt.py b/tasks/kmt.py index 626f5c87619bd..3f7b3af74a200 100644 --- a/tasks/kmt.py +++ b/tasks/kmt.py @@ -104,7 +104,7 @@ def init(ctx, lite=False): @task def update_resources(ctx, no_backup=False): warn("Updating resource dependencies will delete all running stacks.") - if ask("are you sure you want to continue? (Y/n)") != "Y": + if ask("are you sure you want to continue? (y/n)").lower() != "y": raise Exit("[-] Update aborted") for stack in glob(f"{KMT_STACKS_DIR}/*"): @@ -117,7 +117,7 @@ def update_resources(ctx, no_backup=False): @task def revert_resources(ctx): warn("Reverting resource dependencies will delete all running stacks.") - if ask("are you sure you want to revert to backups? (Y/n)") != "Y": + if ask("are you sure you want to revert to backups? (y/n)").lower() != "y": raise Exit("[-] Revert aborted") for stack in glob(f"{KMT_STACKS_DIR}/*"): @@ -208,7 +208,7 @@ def sync(ctx, vms, stack=None, ssh_key=""): for _, vm, ip in target_vms: info(f" Syncing VM {vm} with ip {ip}") - if ask("Do you want to sync? (y/n)") != "y": + if ask("Do you want to sync? (y/n)").lower() != "y": warn("[-] Sync aborted !") return diff --git a/tasks/libs/junit_upload.py b/tasks/libs/junit_upload.py index 0cafe0ead7125..8f5e2414940c8 100644 --- a/tasks/libs/junit_upload.py +++ b/tasks/libs/junit_upload.py @@ -78,9 +78,9 @@ def split_junitxml(xml_path, codeowners, output_dir): return list(output_xmls), flavor -def create_upload_junitxmls_processes(output_dir, owners, flavor, xmlfile_name, process_env, additional_tags=None): +def upload_junitxmls(output_dir, owners, flavor, xmlfile_name, process_env, additional_tags=None): """ - Spawn process to upload all per-team split JUnit XMLs from given directory. + Upload all per-team split JUnit XMLs from given directory. """ processes = [] @@ -113,7 +113,10 @@ def create_upload_junitxmls_processes(output_dir, owners, flavor, xmlfile_name, args.extend(additional_tags) args.append(junit_file_path) processes.append(subprocess.Popen(DATADOG_CI_COMMAND + args, bufsize=-1, env=process_env)) - return processes + for process in processes: + exit_code = process.wait() + if exit_code != 0: + raise subprocess.CalledProcessError(exit_code, DATADOG_CI_COMMAND) def junit_upload_from_tgz(junit_tgz, codeowners_path=".github/CODEOWNERS"): @@ -147,33 +150,14 @@ def junit_upload_from_tgz(junit_tgz, codeowners_path=".github/CODEOWNERS"): # for each unpacked xml file, split it and submit all parts # NOTE: recursive=True is necessary for "**" to unpack into 0-n dirs, not just 1 xmls = 0 - processes = [] - tempdirs = [] for xmlfile in glob.glob(f"{unpack_dir}/**/*.xml", recursive=True): if not os.path.isfile(xmlfile): print(f"[WARN] Matched folder named {xmlfile}") continue xmls += 1 - output_dir = tempfile.TemporaryDirectory() - written_owners, flavor = split_junitxml(xmlfile, codeowners, output_dir.name) - processes.extend( - create_upload_junitxmls_processes( - output_dir.name, written_owners, flavor, xmlfile.split("/")[-1], process_env, tags - ) - ) - tempdirs.append(output_dir) - - # wait for the processes created to finish - try: - for process in processes: - exit_code = process.wait() - if exit_code != 0: - raise subprocess.CalledProcessError(exit_code, DATADOG_CI_COMMAND) - finally: - # ensure the temporary directories created for each xml files are cleaned up - for dir in tempdirs: - dir.cleanup() - + with tempfile.TemporaryDirectory() as output_dir: + written_owners, flavor = split_junitxml(xmlfile, codeowners, output_dir) + upload_junitxmls(output_dir, written_owners, flavor, xmlfile.split("/")[-1], process_env, tags) xmlcounts[junit_tgz] = xmls empty_tgzs = [] diff --git a/tasks/modules.py b/tasks/modules.py index a04b7ab378211..6995cae67cbe6 100644 --- a/tasks/modules.py +++ b/tasks/modules.py @@ -164,10 +164,13 @@ def dependency_path(self, agent_version): "pkg/metrics": GoModule("pkg/metrics", independent=True), "pkg/telemetry": GoModule("pkg/telemetry", independent=True), "comp/core/flare/types": GoModule("comp/core/flare/types", independent=True), + "comp/core/config": GoModule("comp/core/config", independent=True), "comp/core/secrets": GoModule("comp/core/secrets", independent=True), "comp/core/telemetry": GoModule("comp/core/telemetry", independent=True), + "cmd/agent/common/path": GoModule("cmd/agent/common/path", independent=True), "pkg/config/model": GoModule("pkg/config/model", independent=True), "pkg/config/env": GoModule("pkg/config/env", independent=True), + "pkg/config/setup": GoModule("pkg/config/setup", independent=True), "pkg/config/logs": GoModule("pkg/config/logs", independent=True), "pkg/config/remote": GoModule("pkg/config/remote", independent=True), "pkg/security/secl": GoModule("pkg/security/secl", independent=True), @@ -188,6 +191,7 @@ def dependency_path(self, agent_version): "pkg/util/filesystem": GoModule("pkg/util/filesystem", independent=True), "pkg/util/fxutil": GoModule("pkg/util/fxutil", independent=True), "pkg/util/buf": GoModule("pkg/util/buf", independent=True), + "pkg/util/hostname/validate": GoModule("pkg/util/hostname/validate", independent=True), "pkg/util/json": GoModule("pkg/util/json", independent=True), "pkg/util/sort": GoModule("pkg/util/sort", independent=True), "pkg/util/optional": GoModule("pkg/util/optional", independent=True), diff --git a/tasks/security_agent.py b/tasks/security_agent.py index c18673a684d42..8b7d00502a29b 100644 --- a/tasks/security_agent.py +++ b/tasks/security_agent.py @@ -708,7 +708,7 @@ def generate_cws_proto(ctx): def get_git_dirty_files(): - dirty_stats = check_output(["git", "status", "--porcelain=v1", "untracked-files=no"]).decode('utf-8') + dirty_stats = check_output(["git", "status", "--porcelain=v1", "--untracked-files=no"]).decode('utf-8') paths = [] # see https://git-scm.com/docs/git-status#_short_format for format documentation diff --git a/tasks/updater.py b/tasks/updater.py new file mode 100644 index 0000000000000..c0808c4ab5efd --- /dev/null +++ b/tasks/updater.py @@ -0,0 +1,194 @@ +""" +Updater namespaced tasks +""" + + +import os +import sys + +from invoke import task + +from .build_tags import filter_incompatible_tags, get_build_tags, get_default_build_tags +from .go import deps +from .utils import REPO_PATH, bin_name, get_build_flags, get_version, load_release_versions, timed + +BIN_PATH = os.path.join(".", "bin", "updater") +MAJOR_VERSION = '7' + + +@task +def build( + ctx, + rebuild=False, + race=False, + build_include=None, + build_exclude=None, + arch="x64", + go_mod="mod", +): + """ + Build the updater. + """ + + ldflags, gcflags, env = get_build_flags(ctx, major_version=MAJOR_VERSION) + + build_include = ( + get_default_build_tags( + build="updater", + ) # TODO/FIXME: Arch not passed to preserve build tags. Should this be fixed? + if build_include is None + else filter_incompatible_tags(build_include.split(","), arch=arch) + ) + build_exclude = [] if build_exclude is None else build_exclude.split(",") + + build_tags = get_build_tags(build_include, build_exclude) + + race_opt = "-race" if race else "" + build_type = "-a" if rebuild else "" + go_build_tags = " ".join(build_tags) + updater_bin = os.path.join(BIN_PATH, bin_name("updater")) + cmd = f"go build -mod={go_mod} {race_opt} {build_type} -tags \"{go_build_tags}\" " + cmd += f"-o {updater_bin} -gcflags=\"{gcflags}\" -ldflags=\"{ldflags}\" {REPO_PATH}/cmd/updater" + + ctx.run(cmd, env=env) + + +def get_omnibus_env( + ctx, + skip_sign=False, + release_version="nightly", + hardened_runtime=False, + go_mod_cache=None, +): + env = load_release_versions(ctx, release_version) + + # If the host has a GOMODCACHE set, try to reuse it + if not go_mod_cache and os.environ.get('GOMODCACHE'): + go_mod_cache = os.environ.get('GOMODCACHE') + + if go_mod_cache: + env['OMNIBUS_GOMODCACHE'] = go_mod_cache + + env['OMNIBUS_OPENSSL_SOFTWARE'] = 'openssl3' + + env_override = ['INTEGRATIONS_CORE_VERSION', 'OMNIBUS_SOFTWARE_VERSION'] + for key in env_override: + value = os.environ.get(key) + # Only overrides the env var if the value is a non-empty string. + if value: + env[key] = value + + if sys.platform == 'darwin': + # Target MacOS 10.12 + env['MACOSX_DEPLOYMENT_TARGET'] = '10.12' + + if skip_sign: + env['SKIP_SIGN_MAC'] = 'true' + if hardened_runtime: + env['HARDENED_RUNTIME_MAC'] = 'true' + + env['PACKAGE_VERSION'] = get_version( + ctx, include_git=True, url_safe=True, major_version=MAJOR_VERSION, include_pipeline_id=True + ) + env['MAJOR_VERSION'] = MAJOR_VERSION + + return env + + +def omnibus_run_task(ctx, task, target_project, base_dir, env, omnibus_s3_cache=False, log_level="info"): + with ctx.cd("omnibus"): + overrides_cmd = "" + if base_dir: + overrides_cmd = f"--override=base_dir:{base_dir}" + + omnibus = "bundle exec omnibus" + if omnibus_s3_cache: + populate_s3_cache = "--populate-s3-cache" + else: + populate_s3_cache = "" + + cmd = "{omnibus} {task} {project_name} --log-level={log_level} {populate_s3_cache} {overrides}" + args = { + "omnibus": omnibus, + "task": task, + "project_name": target_project, + "log_level": log_level, + "overrides": overrides_cmd, + "populate_s3_cache": populate_s3_cache, + } + + ctx.run(cmd.format(**args), env=env) + + +def bundle_install_omnibus(ctx, gem_path=None, env=None): + with ctx.cd("omnibus"): + # make sure bundle install starts from a clean state + try: + os.remove("Gemfile.lock") + except Exception: + pass + + cmd = "bundle install" + if gem_path: + cmd += f" --path {gem_path}" + ctx.run(cmd, env=env) + + +# hardened-runtime needs to be set to False to build on MacOS < 10.13.6, as the -o runtime option is not supported. +@task( + help={ + 'skip-sign': "On macOS, use this option to build an unsigned package if you don't have Datadog's developer keys.", + 'hardened-runtime': "On macOS, use this option to enforce the hardened runtime setting, adding '-o runtime' to all codesign commands", + } +) +def omnibus_build( + ctx, + log_level="info", + base_dir=None, + gem_path=None, + skip_deps=False, + skip_sign=False, + release_version="nightly", + omnibus_s3_cache=False, + hardened_runtime=False, + go_mod_cache=None, +): + """ + Build the Agent packages with Omnibus Installer. + """ + if not skip_deps: + with timed(quiet=True) as deps_elapsed: + deps(ctx) + + # base dir (can be overridden through env vars, command line takes precedence) + base_dir = base_dir or os.environ.get("OMNIBUS_BASE_DIR") + + env = get_omnibus_env( + ctx, + skip_sign=skip_sign, + release_version=release_version, + hardened_runtime=hardened_runtime, + go_mod_cache=go_mod_cache, + ) + + target_project = "updater" + + with timed(quiet=True) as bundle_elapsed: + bundle_install_omnibus(ctx, gem_path, env) + + with timed(quiet=True) as omnibus_elapsed: + omnibus_run_task( + ctx=ctx, + task="build", + target_project=target_project, + base_dir=base_dir, + env=env, + omnibus_s3_cache=omnibus_s3_cache, + log_level=log_level, + ) + + print("Build component timing:") + if not skip_deps: + print(f"Deps: {deps_elapsed.duration}") + print(f"Bundle: {bundle_elapsed.duration}") + print(f"Omnibus: {omnibus_elapsed.duration}") diff --git a/tasks/utils.py b/tasks/utils.py index 4c9029aad66e5..549f7ba300112 100644 --- a/tasks/utils.py +++ b/tasks/utils.py @@ -165,9 +165,9 @@ def get_build_flags( # If we're not building with both Python, we want to force the use of DefaultPython if not has_both_python(python_runtimes): - ldflags += f"-X {REPO_PATH}/pkg/config.ForceDefaultPython=true " + ldflags += f"-X {REPO_PATH}/pkg/config/setup.ForceDefaultPython=true " - ldflags += f"-X {REPO_PATH}/pkg/config.DefaultPython={get_default_python(python_runtimes)} " + ldflags += f"-X {REPO_PATH}/pkg/config/setup.DefaultPython={get_default_python(python_runtimes)} " # adding rtloader libs and headers to the env if rtloader_lib: diff --git a/test/e2e/cws-tests/requirements.txt b/test/e2e/cws-tests/requirements.txt index 44f59e24b6dc5..afd5d527093e5 100644 --- a/test/e2e/cws-tests/requirements.txt +++ b/test/e2e/cws-tests/requirements.txt @@ -1,8 +1,8 @@ kubernetes==28.1.0 -datadog-api-client==2.19.0 +datadog-api-client==2.20.0 pyaml==23.9.7 -docker==6.1.3 +docker==7.0.0 retry==0.9.2 -emoji==2.8.0 +emoji==2.9.0 requests==2.31.0 jsonschema==4.20.0 \ No newline at end of file diff --git a/test/fakeintake/aggregator/common.go b/test/fakeintake/aggregator/common.go index 7e0bc589055de..50500b2023118 100644 --- a/test/fakeintake/aggregator/common.go +++ b/test/fakeintake/aggregator/common.go @@ -11,6 +11,7 @@ import ( "compress/zlib" "io" "sort" + "sync" "time" "github.com/DataDog/datadog-agent/test/fakeintake/api" @@ -29,6 +30,8 @@ type parseFunc[P PayloadItem] func(payload api.Payload) (items []P, err error) type Aggregator[P PayloadItem] struct { payloadsByName map[string][]P parse parseFunc[P] + + mutex sync.RWMutex } const ( @@ -43,42 +46,40 @@ func newAggregator[P PayloadItem](parse parseFunc[P]) Aggregator[P] { return Aggregator[P]{ payloadsByName: map[string][]P{}, parse: parse, + mutex: sync.RWMutex{}, } } // UnmarshallPayloads aggregate the payloads func (agg *Aggregator[P]) UnmarshallPayloads(payloads []api.Payload) error { - // reset map - agg.Reset() - // build map + // build new map + payloadsByName := map[string][]P{} for _, p := range payloads { payloads, err := agg.parse(p) if err != nil { return err } + for _, item := range payloads { - if _, found := agg.payloadsByName[item.name()]; !found { - agg.payloadsByName[item.name()] = []P{} + if _, found := payloadsByName[item.name()]; !found { + payloadsByName[item.name()] = []P{} } - agg.payloadsByName[item.name()] = append(agg.payloadsByName[item.name()], item) + payloadsByName[item.name()] = append(payloadsByName[item.name()], item) } } + agg.replace(payloadsByName) return nil } // ContainsPayloadName return true if name match one of the payloads func (agg *Aggregator[P]) ContainsPayloadName(name string) bool { - _, found := agg.payloadsByName[name] - return found + return len(agg.GetPayloadsByName(name)) != 0 } // ContainsPayloadNameAndTags return true if the payload name exist and on of the payloads contains all the tags func (agg *Aggregator[P]) ContainsPayloadNameAndTags(name string, tags []string) bool { - payloads, found := agg.payloadsByName[name] - if !found { - return false - } + payloads := agg.GetPayloadsByName(name) for _, payloadItem := range payloads { if AreTagsSubsetOfOtherTags(tags, payloadItem.GetTags()) { @@ -91,11 +92,18 @@ func (agg *Aggregator[P]) ContainsPayloadNameAndTags(name string, tags []string) // GetNames return the names of the payloads func (agg *Aggregator[P]) GetNames() []string { - names := []string{} + names := agg.getNamesUnsorted() + sort.Strings(names) + return names +} + +func (agg *Aggregator[P]) getNamesUnsorted() []string { + agg.mutex.RLock() + defer agg.mutex.RUnlock() + names := make([]string, 0, len(agg.payloadsByName)) for name := range agg.payloadsByName { names = append(names, name) } - sort.Strings(names) return names } @@ -126,14 +134,32 @@ func getReadCloserForEncoding(payload []byte, encoding string) (rc io.ReadCloser // GetPayloadsByName return the payloads for the resource name func (agg *Aggregator[P]) GetPayloadsByName(name string) []P { - return agg.payloadsByName[name] + agg.mutex.RLock() + defer agg.mutex.RUnlock() + payloads := agg.payloadsByName[name] + return payloads } // Reset the aggregation func (agg *Aggregator[P]) Reset() { + agg.mutex.Lock() + defer agg.mutex.Unlock() + agg.unsafeReset() +} + +func (agg *Aggregator[P]) unsafeReset() { agg.payloadsByName = map[string][]P{} } +func (agg *Aggregator[P]) replace(payloadsByName map[string][]P) { + agg.mutex.Lock() + defer agg.mutex.Unlock() + agg.unsafeReset() + for name, payloads := range payloadsByName { + agg.payloadsByName[name] = payloads + } +} + // FilterByTags return the payloads that match all the tags func FilterByTags[P PayloadItem](payloads []P, tags []string) []P { ret := []P{} diff --git a/test/fakeintake/aggregator/common_test.go b/test/fakeintake/aggregator/common_test.go index 0caee382a5b9f..2deff16c3799a 100644 --- a/test/fakeintake/aggregator/common_test.go +++ b/test/fakeintake/aggregator/common_test.go @@ -7,6 +7,7 @@ package aggregator import ( "encoding/json" "runtime" + "sync" "testing" "time" @@ -66,7 +67,7 @@ func generateTestData() (data []api.Payload, err error) { }, nil } -func validateCollectionTime(t *testing.T, agg Aggregator[*mockPayloadItem]) { +func validateCollectionTime(t *testing.T, agg *Aggregator[*mockPayloadItem]) { if runtime.GOOS != "linux" { t.Logf("validateCollectionTime test skip on %s", runtime.GOOS) return @@ -80,26 +81,28 @@ func validateCollectionTime(t *testing.T, agg Aggregator[*mockPayloadItem]) { func TestCommonAggregator(t *testing.T) { t.Run("ContainsPayloadName", func(t *testing.T) { + agg := newAggregator(parseMockPayloadItem) + assert.False(t, agg.ContainsPayloadName("totoro")) data, err := generateTestData() require.NoError(t, err) - agg := newAggregator(parseMockPayloadItem) err = agg.UnmarshallPayloads(data) assert.NoError(t, err) assert.True(t, agg.ContainsPayloadName("totoro")) assert.False(t, agg.ContainsPayloadName("ponyo")) - validateCollectionTime(t, agg) + validateCollectionTime(t, &agg) }) t.Run("ContainsPayloadNameAndTags", func(t *testing.T) { + agg := newAggregator(parseMockPayloadItem) + assert.False(t, agg.ContainsPayloadNameAndTags("totoro", []string{"age:123"})) data, err := generateTestData() require.NoError(t, err) - agg := newAggregator(parseMockPayloadItem) err = agg.UnmarshallPayloads(data) assert.NoError(t, err) assert.True(t, agg.ContainsPayloadNameAndTags("totoro", []string{"age:123"})) assert.False(t, agg.ContainsPayloadNameAndTags("porco rosso", []string{"country:it", "role:king"})) assert.True(t, agg.ContainsPayloadNameAndTags("porco rosso", []string{"country:it", "role:pilot"})) - validateCollectionTime(t, agg) + validateCollectionTime(t, &agg) }) t.Run("AreTagsSubsetOfOtherTags", func(t *testing.T) { @@ -127,11 +130,39 @@ func TestCommonAggregator(t *testing.T) { }) t.Run("Reset", func(t *testing.T) { - _, err := generateTestData() + data, err := generateTestData() require.NoError(t, err) agg := newAggregator(parseMockPayloadItem) + err = agg.UnmarshallPayloads(data) + require.NoError(t, err) + assert.NotEmpty(t, agg.payloadsByName) agg.Reset() - assert.Equal(t, 0, len(agg.payloadsByName)) - validateCollectionTime(t, agg) + assert.Empty(t, agg.payloadsByName) + }) + + t.Run("Thread safe", func(t *testing.T) { + var wg sync.WaitGroup + data, err := generateTestData() + require.NoError(t, err) + agg := newAggregator(parseMockPayloadItem) + // add some data to ensure we have names + err = agg.UnmarshallPayloads(data) + assert.NoError(t, err) + wg.Add(2) + go func() { + defer wg.Done() + for i := 0; i < 100; i++ { + err := agg.UnmarshallPayloads(data) + assert.NoError(t, err) + } + }() + go func() { + defer wg.Done() + for i := 0; i < 100; i++ { + names := agg.GetNames() + assert.NotEmpty(t, names) + } + }() + wg.Wait() }) } diff --git a/test/fakeintake/api/api.go b/test/fakeintake/api/api.go index 1372d589ad640..6e8c170cff145 100644 --- a/test/fakeintake/api/api.go +++ b/test/fakeintake/api/api.go @@ -48,5 +48,6 @@ type ResponseOverride struct { Endpoint string `json:"endpoint"` StatusCode int `json:"status_code"` ContentType string `json:"content_type"` + Method string `json:"method"` Body []byte `json:"body"` } diff --git a/test/fakeintake/go.mod b/test/fakeintake/go.mod index 05e58e481508f..cba5096ea852c 100644 --- a/test/fakeintake/go.mod +++ b/test/fakeintake/go.mod @@ -11,7 +11,7 @@ require ( github.com/olekukonko/tablewriter v0.0.5 github.com/prometheus/client_golang v1.17.0 github.com/spf13/cobra v1.8.0 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.4 ) require ( diff --git a/test/fakeintake/go.sum b/test/fakeintake/go.sum index 11da9b39e6d5f..5e51030eb6142 100644 --- a/test/fakeintake/go.sum +++ b/test/fakeintake/go.sum @@ -18,7 +18,6 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/gogo/protobuf v1.0.0 h1:2jyBKDKU/8v3v2xVR2PtiWQviFUyiaGk2rpfyFT8rTM= @@ -60,13 +59,8 @@ github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -77,6 +71,5 @@ google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/fakeintake/server/body.go b/test/fakeintake/server/body.go index 5bbdd1d58582c..b4b8096a85711 100644 --- a/test/fakeintake/server/body.go +++ b/test/fakeintake/server/body.go @@ -21,9 +21,6 @@ type flareResponseBody struct { Error string `json:"error,omitempty"` } -// defaultResponse is the default response returned by the fakeintake server -var defaultResponse httpResponse - func getConnectionsResponse() []byte { clStatus := &agentmodel.CollectorStatus{ ActiveClients: 1, @@ -49,17 +46,29 @@ func getConnectionsResponse() []byte { // newResponseOverrides creates and returns a map of URL paths to HTTP responses populated with // static custom response overrides -func newResponseOverrides() map[string]httpResponse { - return map[string]httpResponse{ - "/support/flare": updateResponseFromData(httpResponse{ - statusCode: http.StatusOK, - contentType: "application/json", - data: flareResponseBody{CaseID: 0, Error: ""}, - }), - "/api/v1/connections": updateResponseFromData(httpResponse{ - statusCode: http.StatusOK, - contentType: "application/x-protobuf", - data: getConnectionsResponse(), - }), +func newResponseOverrides() map[string]map[string]httpResponse { + return map[string]map[string]httpResponse{ + http.MethodPost: { + "/api/v1/connections": updateResponseFromData(httpResponse{ + statusCode: http.StatusOK, + contentType: "application/x-protobuf", + data: getConnectionsResponse(), + }), + }, + http.MethodGet: {}, + http.MethodConnect: {}, + http.MethodDelete: {}, + http.MethodHead: { + // Datadog Agent sends a HEAD request to avoid redirect issue before sending the actual flare + "/support/flare": updateResponseFromData(httpResponse{ + statusCode: http.StatusOK, + contentType: "application/json", + data: flareResponseBody{}, + }), + }, + http.MethodOptions: {}, + http.MethodPatch: {}, + http.MethodPut: {}, + http.MethodTrace: {}, } } diff --git a/test/fakeintake/server/http.go b/test/fakeintake/server/http.go index b881f42bcfe1f..615619f421e68 100644 --- a/test/fakeintake/server/http.go +++ b/test/fakeintake/server/http.go @@ -8,6 +8,7 @@ package server import ( "encoding/json" "net/http" + "sync" ) type httpResponse struct { @@ -46,8 +47,28 @@ func updateResponseFromData(r httpResponse) httpResponse { } } r.body = bodyJSON - } else { + } else if r.data != nil { r.body = r.data.([]byte) } return r } + +func isValidMethod(method string) bool { + var once sync.Once + var validMethods map[string]any + once.Do(func() { + validMethods = map[string]any{ + http.MethodGet: nil, + http.MethodPost: nil, + http.MethodConnect: nil, + http.MethodDelete: nil, + http.MethodHead: nil, + http.MethodPut: nil, + http.MethodPatch: nil, + http.MethodOptions: nil, + http.MethodTrace: nil, + } + }) + _, found := validMethods[method] + return found +} diff --git a/test/fakeintake/server/server.go b/test/fakeintake/server/server.go index 08fe54a0f4445..cf08f8e8d1ea3 100644 --- a/test/fakeintake/server/server.go +++ b/test/fakeintake/server/server.go @@ -35,15 +35,25 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" ) +// defaultResponse is the default response returned by the fakeintake server +var defaultResponseByMethod map[string]httpResponse + func init() { - defaultResponse = updateResponseFromData(httpResponse{ - statusCode: http.StatusOK, - contentType: "application/json", - data: errorResponseBody{Errors: []string{}}, - }) + defaultResponseByMethod = map[string]httpResponse{ + http.MethodGet: updateResponseFromData(httpResponse{ + statusCode: http.StatusOK, + }), + http.MethodPost: updateResponseFromData(httpResponse{ + statusCode: http.StatusOK, + contentType: "application/json", + data: errorResponseBody{ + Errors: make([]string, 0), + }, + }), + } } -//nolint:revive // TODO(APL) Fix revive linter +// Server is a struct implementing a fakeintake server type Server struct { server http.Server ready chan bool @@ -56,22 +66,22 @@ type Server struct { store *serverstore.Store - responseOverridesMutex sync.RWMutex - responseOverrides map[string]httpResponse + responseOverridesMutex sync.RWMutex + responseOverridesByMethod map[string]map[string]httpResponse } -// NewServer creates a new fake intake server and starts it on localhost:port +// NewServer creates a new fakeintake server and starts it on localhost:port // options accept WithPort and WithReadyChan. // Call Server.Start() to start the server in a separate go-routine // If the port is 0, a port number is automatically chosen func NewServer(options ...func(*Server)) *Server { fi := &Server{ - urlMutex: sync.RWMutex{}, - clock: clock.New(), - retention: 15 * time.Minute, - store: serverstore.NewStore(), - responseOverridesMutex: sync.RWMutex{}, - responseOverrides: newResponseOverrides(), + urlMutex: sync.RWMutex{}, + clock: clock.New(), + retention: 15 * time.Minute, + store: serverstore.NewStore(), + responseOverridesMutex: sync.RWMutex{}, + responseOverridesByMethod: newResponseOverrides(), } registry := prometheus.NewRegistry() @@ -127,7 +137,7 @@ func WithPort(port int) func(*Server) { } } -// WithReadyChannel assign a boolean channel to get notified when the server is ready. +// WithReadyChannel assign a boolean channel to get notified when the server is ready func WithReadyChannel(ready chan bool) func(*Server) { return func(fi *Server) { if fi.IsRunning() { @@ -138,7 +148,7 @@ func WithReadyChannel(ready chan bool) func(*Server) { } } -//nolint:revive // TODO(APL) Fix revive linter +// WithClock changes the clock used by the server func WithClock(clock clock.Clock) func(*Server) { return func(fi *Server) { if fi.IsRunning() { @@ -149,7 +159,7 @@ func WithClock(clock clock.Clock) func(*Server) { } } -//nolint:revive // TODO(APL) Fix revive linter +// WithRetention changes the retention time of payloads in the store func WithRetention(retention time.Duration) func(*Server) { return func(fi *Server) { if fi.IsRunning() { @@ -175,7 +185,7 @@ func (fi *Server) Start() { go fi.cleanUpPayloadsRoutine() } -//nolint:revive // TODO(APL) Fix revive linter +// URL returns the URL of the fakeintake server func (fi *Server) URL() string { fi.urlMutex.RLock() defer fi.urlMutex.RUnlock() @@ -188,7 +198,7 @@ func (fi *Server) setURL(url string) { fi.url = url } -//nolint:revive // TODO(APL) Fix revive linter +// IsRunning returns true if the fakeintake server is running func (fi *Server) IsRunning() bool { return fi.URL() != "" } @@ -259,42 +269,41 @@ func (fi *Server) handleDatadogRequest(w http.ResponseWriter, req *http.Request) log.Printf("Handling Datadog %s request to %s, header %v", req.Method, req.URL.Path, req.Header) - if req.Method == http.MethodGet { - writeHTTPResponse(w, httpResponse{ - statusCode: http.StatusOK, - }) - return - } - - // Datadog Agent sends a HEAD request to avoid redirect issue before sending the actual flare - if req.Method == http.MethodHead && req.URL.Path == "/support/flare" { - writeHTTPResponse(w, httpResponse{ - statusCode: http.StatusOK, - }) - return + switch req.Method { + case http.MethodPost: + err := fi.handleDatadogPostRequest(w, req) + if err == nil { + return + } + case http.MethodGet: + fallthrough + case http.MethodHead: + fallthrough + default: + if response, ok := fi.getResponseFromURLPath(req.Method, req.URL.Path); ok { + writeHTTPResponse(w, response) + return + } } - // from now on accept only POST requests - if req.Method != http.MethodPost { - response := buildErrorResponse(fmt.Errorf("invalid request with route %s and method %s", req.URL.Path, req.Method)) - writeHTTPResponse(w, response) - return - } + response := buildErrorResponse(fmt.Errorf("invalid request with route %s and method %s", req.URL.Path, req.Method)) + writeHTTPResponse(w, response) +} +func (fi *Server) handleDatadogPostRequest(w http.ResponseWriter, req *http.Request) error { if req.Body == nil { response := buildErrorResponse(errors.New("invalid request, nil body")) writeHTTPResponse(w, response) - return + return nil } payload, err := io.ReadAll(req.Body) if err != nil { log.Printf("Error reading body: %v", err.Error()) response := buildErrorResponse(err) writeHTTPResponse(w, response) - return + return nil } - // TODO: store all headers directly, and fetch Content-Type/Content-Encoding values when parsing encoding := req.Header.Get("Content-Encoding") if req.URL.Path == "/support/flare" || encoding == "" { encoding = req.Header.Get("Content-Type") @@ -305,11 +314,15 @@ func (fi *Server) handleDatadogRequest(w http.ResponseWriter, req *http.Request) log.Printf("Error caching payload: %v", err.Error()) response := buildErrorResponse(err) writeHTTPResponse(w, response) - return + return nil } - response := fi.getResponseFromURLPath(req.URL.Path) - writeHTTPResponse(w, response) + if response, ok := fi.getResponseFromURLPath(http.MethodPost, req.URL.Path); ok { + writeHTTPResponse(w, response) + return nil + } + + return fmt.Errorf("no POST response found for path %s", req.URL.Path) } func (fi *Server) handleFlushPayloads(w http.ResponseWriter, _ *http.Request) { @@ -383,8 +396,7 @@ func (fi *Server) handleFakeHealth(w http.ResponseWriter, _ *http.Request) { }) } -//nolint:revive // TODO(APL) Fix revive linter -func (fi *Server) handleGetRouteStats(w http.ResponseWriter, req *http.Request) { +func (fi *Server) handleGetRouteStats(w http.ResponseWriter, _ *http.Request) { log.Print("Handling getRouteStats request") routes := fi.store.GetRouteStats() // build response @@ -441,9 +453,19 @@ func (fi *Server) handleConfigureOverride(w http.ResponseWriter, req *http.Reque return } + if payload.Method == "" { + payload.Method = http.MethodPost + } + + if !isValidMethod(payload.Method) { + response := buildErrorResponse(fmt.Errorf("invalid request method %s", payload.Method)) + writeHTTPResponse(w, response) + return + } + log.Printf("Handling configureOverride request for endpoint %s", payload.Endpoint) - fi.safeSetResponseOverride(payload.Endpoint, httpResponse{ + fi.safeSetResponseOverride(payload.Method, payload.Endpoint, httpResponse{ statusCode: payload.StatusCode, contentType: payload.ContentType, body: payload.Body, @@ -454,22 +476,29 @@ func (fi *Server) handleConfigureOverride(w http.ResponseWriter, req *http.Reque }) } -func (fi *Server) safeSetResponseOverride(endpoint string, response httpResponse) { +func (fi *Server) safeSetResponseOverride(method string, endpoint string, response httpResponse) { fi.responseOverridesMutex.Lock() defer fi.responseOverridesMutex.Unlock() - - fi.responseOverrides[endpoint] = response + fi.responseOverridesByMethod[method][endpoint] = response } // getResponseFromURLPath returns the HTTP response for a given URL path, or the default response if // no override exists -func (fi *Server) getResponseFromURLPath(path string) httpResponse { +func (fi *Server) getResponseFromURLPath(method string, path string) (httpResponse, bool) { fi.responseOverridesMutex.RLock() defer fi.responseOverridesMutex.RUnlock() - if resp, ok := fi.responseOverrides[path]; ok { - return resp + // by default, update the response for POST requests + if method == "" { + method = http.MethodPost + } + + if respForMethod, ok := fi.responseOverridesByMethod[method]; ok { + if resp, ok := respForMethod[path]; ok { + return resp, true + } } - return defaultResponse + response, found := defaultResponseByMethod[method] + return response, found } diff --git a/test/fakeintake/server/server_test.go b/test/fakeintake/server/server_test.go index 7883c7bac0985..f06dec5b23492 100644 --- a/test/fakeintake/server/server_test.go +++ b/test/fakeintake/server/server_test.go @@ -12,56 +12,95 @@ import ( "bytes" _ "embed" "encoding/json" - "errors" + "fmt" "io" "net/http" - "net/http/httptest" + "strconv" "strings" "testing" "time" "github.com/DataDog/datadog-agent/test/fakeintake/api" "github.com/benbjohnson/clock" - "github.com/cenkalti/backoff" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestServer(t *testing.T) { - t.Run("should accept payloads on any route", func(t *testing.T) { + + t.Run("should not run before start", func(t *testing.T) { fi := NewServer(WithClock(clock.NewMock())) + assert.False(t, fi.IsRunning()) + assert.Empty(t, fi.URL()) + }) - request, err := http.NewRequest(http.MethodPost, "/totoro", strings.NewReader("totoro|5|tag:valid,owner:pducolin")) - assert.NoError(t, err, "Error creating POST request") - response := httptest.NewRecorder() + t.Run("should return error when calling stop on a non-started server", func(t *testing.T) { + fi := NewServer() + err := fi.Stop() + assert.Error(t, err) + assert.Equal(t, "server not running", err.Error()) + }) - fi.handleDatadogRequest(response, request) + t.Run("should run after start", func(t *testing.T) { + fi := NewServer(WithClock(clock.NewMock())) + fi.Start() + defer fi.Stop() + assert.EventuallyWithT(t, func(collect *assert.CollectT) { + assert.True(collect, fi.IsRunning()) + assert.NotEmpty(collect, fi.URL()) + resp, err := http.Get(fi.URL() + "/fakeintake/health") + assert.NoError(collect, err) + if err != nil { + return + } + defer resp.Body.Close() + assert.Equal(collect, http.StatusOK, resp.StatusCode) + }, 500*time.Millisecond, 10*time.Millisecond) + }) - assert.Equal(t, http.StatusOK, response.Code, "unexpected code") + t.Run("should correctly notify when a server is ready", func(t *testing.T) { + ready := make(chan bool, 1) + fi := NewServer(WithClock(clock.NewMock()), WithReadyChannel(ready)) + fi.Start() + defer fi.Stop() + ok := <-ready + assert.True(t, ok) + assert.NotEmpty(t, fi.URL()) + resp, err := http.Get(fi.URL() + "/fakeintake/health") + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, http.StatusOK, resp.StatusCode) }) - t.Run("should accept GET requests on any other route", func(t *testing.T) { - fi := NewServer(WithClock(clock.NewMock())) + t.Run("should accept payloads on any route", func(t *testing.T) { + fi, _ := InitialiseForTests(t) + defer fi.Stop() - request, err := http.NewRequest(http.MethodGet, "/kiki", nil) - assert.NoError(t, err, "Error creating GET request") - response := httptest.NewRecorder() + response, err := http.Post(fi.URL()+"/totoro", "text/plain", strings.NewReader("totoro|5|tag:valid,owner:pducolin")) + require.NoError(t, err, "Error posting payload") + defer response.Body.Close() + assert.Equal(t, http.StatusOK, response.StatusCode, "unexpected code") + }) - fi.handleDatadogRequest(response, request) + t.Run("should accept GET requests on any route", func(t *testing.T) { + fi, _ := InitialiseForTests(t) + defer fi.Stop() - assert.Equal(t, http.StatusOK, response.Code, "unexpected code") + response, err := http.Get(fi.URL() + "/kiki") + require.NoError(t, err, "Error on GET request") + defer response.Body.Close() + assert.Equal(t, http.StatusOK, response.StatusCode, "unexpected code") }) t.Run("should accept GET requests on /fakeintake/payloads route", func(t *testing.T) { - fi := NewServer(WithClock(clock.NewMock())) - - request, err := http.NewRequest(http.MethodGet, "/fakeintake/payloads?endpoint=/foo", nil) + fi, _ := InitialiseForTests(t) + defer fi.Stop() - assert.NoError(t, err, "Error creating GET request") - response := httptest.NewRecorder() + response, err := http.Get(fi.URL() + "/fakeintake/payloads?endpoint=/foo") + require.NoError(t, err, "Error on GET request") + defer response.Body.Close() - fi.handleGetPayloads(response, request) - assert.Equal(t, http.StatusOK, response.Code, "unexpected code") + assert.Equal(t, http.StatusOK, response.StatusCode, "unexpected code") expectedResponse := api.APIFakeIntakePayloadsRawGETResponse{ Payloads: []api.Payload{}, @@ -69,37 +108,47 @@ func TestServer(t *testing.T) { actualResponse := api.APIFakeIntakePayloadsRawGETResponse{} body, err := io.ReadAll(response.Body) assert.NoError(t, err, "Error reading response") - assert.Equal(t, "application/json", response.Header().Get("Content-Type")) + assert.Equal(t, "application/json", response.Header.Get("Content-Type")) json.Unmarshal(body, &actualResponse) assert.Equal(t, expectedResponse, actualResponse, "unexpected response") }) t.Run("should not accept GET requests on /fakeintake/payloads route without endpoint query parameter", func(t *testing.T) { - fi := NewServer(WithClock(clock.NewMock())) - - request, err := http.NewRequest(http.MethodGet, "/fakeintake/payloads", nil) - - assert.NoError(t, err, "Error creating GET request") - response := httptest.NewRecorder() + fi, _ := InitialiseForTests(t) + defer fi.Stop() - fi.handleGetPayloads(response, request) - assert.Equal(t, http.StatusBadRequest, response.Code, "unexpected code") - assert.Equal(t, "text/plain", response.Header().Get("Content-Type")) + response, err := http.Get(fi.URL() + "/fakeintake/payloads") + require.NoError(t, err, "Error on GET request") + defer response.Body.Close() + assert.Equal(t, http.StatusBadRequest, response.StatusCode, "unexpected code") + assert.Equal(t, "text/plain", response.Header.Get("Content-Type")) }) t.Run("should store multiple payloads on any route and return them", func(t *testing.T) { - clock := clock.NewMock() - fi := NewServer(WithClock(clock)) + fi, clock := InitialiseForTests(t) + defer fi.Stop() - postSomeFakePayloads(t, fi) + PostSomeFakePayloads(t, fi.URL(), []TestTextPayload{ + { + Endpoint: "/totoro", + Data: "totoro|7|tag:valid,owner:pducolin", + }, + { + Endpoint: "/totoro", + Data: "totoro|5|tag:valid,owner:kiki", + }, + { + Endpoint: "/kiki", + Data: "I am just a poor raw log", + }, + }) - request, err := http.NewRequest(http.MethodGet, "/fakeintake/payloads?endpoint=/totoro", nil) - assert.NoError(t, err, "Error creating GET request") - getResponse := httptest.NewRecorder() - fi.handleGetPayloads(getResponse, request) - assert.Equal(t, http.StatusOK, getResponse.Code) - assert.Equal(t, "application/json", getResponse.Header().Get("Content-Type")) + getResponse, err := http.Get(fi.URL() + "/fakeintake/payloads?endpoint=/totoro") + require.NoError(t, err, "Error on GET request") + defer getResponse.Body.Close() + assert.Equal(t, http.StatusOK, getResponse.StatusCode, "unexpected code") + assert.Equal(t, "application/json", getResponse.Header.Get("Content-Type")) actualGETResponse := api.APIFakeIntakePayloadsRawGETResponse{} body, err := io.ReadAll(getResponse.Body) assert.NoError(t, err, "Error reading GET response") @@ -109,12 +158,12 @@ func TestServer(t *testing.T) { Payloads: []api.Payload{ { Timestamp: clock.Now().UTC(), - Encoding: "", + Encoding: "text/plain", Data: []byte("totoro|7|tag:valid,owner:pducolin"), }, { Timestamp: clock.Now().UTC(), - Encoding: "", + Encoding: "text/plain", Data: []byte("totoro|5|tag:valid,owner:kiki"), }, }, @@ -123,18 +172,16 @@ func TestServer(t *testing.T) { }) t.Run("should store multiple payloads on any route and return them in json", func(t *testing.T) { - clock := clock.NewMock() - fi := NewServer(WithClock(clock)) - - postSomeRealisticPayloads(t, fi) + fi, clock := InitialiseForTests(t) + defer fi.Stop() - request, err := http.NewRequest(http.MethodGet, "/fakeintake/payloads?endpoint=/api/v2/logs&format=json", nil) - assert.NoError(t, err, "Error creating GET request") - getResponse := httptest.NewRecorder() + PostSomeRealisticLogs(t, fi.URL()) - fi.handleGetPayloads(getResponse, request) + response, err := http.Get(fi.URL() + "/fakeintake/payloads?endpoint=/api/v2/logs&format=json") + require.NoError(t, err, "Error creating GET request") + defer response.Body.Close() - assert.Equal(t, http.StatusOK, getResponse.Code) + assert.Equal(t, http.StatusOK, response.StatusCode, "unexpected code") expectedGETResponse := api.APIFakeIntakePayloadsJsonGETResponse{ Payloads: []api.ParsedPayload{ { @@ -152,92 +199,36 @@ func TestServer(t *testing.T) { }, } actualGETResponse := api.APIFakeIntakePayloadsJsonGETResponse{} - body, err := io.ReadAll(getResponse.Body) + body, err := io.ReadAll(response.Body) assert.NoError(t, err, "Error reading GET response") json.Unmarshal(body, &actualGETResponse) assert.Equal(t, expectedGETResponse, actualGETResponse, "unexpected GET response") - }) - t.Run("should accept GET requests on /fakeintake/health route", func(t *testing.T) { - fi := NewServer(WithClock(clock.NewMock())) - - request, err := http.NewRequest(http.MethodGet, "/fakeintake/health", nil) - - assert.NoError(t, err, "Error creating GET request") - response := httptest.NewRecorder() - - fi.handleFakeHealth(response, request) - assert.Equal(t, http.StatusOK, response.Code, "unexpected code") - }) - - t.Run("should return error when calling stop on a non-started server", func(t *testing.T) { - fi := NewServer(WithClock(clock.NewMock())) - - err := fi.Stop() - assert.Error(t, err) - assert.Equal(t, "server not running", err.Error()) - }) - - t.Run("should correctly start a server with no ready channel defined", func(t *testing.T) { - fi := NewServer(WithClock(clock.NewMock())) - - fi.Start() - - err := backoff.Retry(func() error { - url := fi.URL() - if url == "" { - return errors.New("server not ready") - } - resp, err := http.Get(url + "/fakeintake/health") - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return errors.New("server not ready") - } - return nil - }, backoff.WithMaxRetries(backoff.NewConstantBackOff(10*time.Millisecond), 25)) - require.NoError(t, err) - err = fi.Stop() - assert.NoError(t, err) - }) - - t.Run("should correctly notify when a server is ready", func(t *testing.T) { - ready := make(chan bool, 1) - fi := NewServer(WithClock(clock.NewMock()), WithReadyChannel(ready)) - fi.Start() - ok := <-ready - assert.True(t, ok) - assert.NotEmpty(t, fi.URL()) - err := backoff.Retry(func() error { - resp, err := http.Get(fi.URL() + "/fakeintake/health") - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return errors.New("server not ready") - } - return nil - }, backoff.WithMaxRetries(backoff.NewConstantBackOff(10*time.Millisecond), 25)) - require.NoError(t, err) - err = fi.Stop() - assert.NoError(t, err) - }) t.Run("should store multiple payloads on any route and return the list of routes", func(t *testing.T) { - fi := NewServer(WithClock(clock.NewMock())) - - postSomeFakePayloads(t, fi) + fi, _ := InitialiseForTests(t) + defer fi.Stop() - request, err := http.NewRequest(http.MethodGet, "/fakeintake/routestats", nil) - assert.NoError(t, err, "Error creating GET request") - getResponse := httptest.NewRecorder() + PostSomeFakePayloads(t, fi.URL(), []TestTextPayload{ + { + Endpoint: "/totoro", + Data: "totoro|7|tag:valid,owner:pducolin", + }, + { + Endpoint: "/totoro", + Data: "totoro|5|tag:valid,owner:kiki", + }, + { + Endpoint: "/kiki", + Data: "I am just a poor raw log", + }, + }) - fi.handleGetRouteStats(getResponse, request) + response, err := http.Get(fi.URL() + "/fakeintake/routestats") + require.NoError(t, err, "Error on GET request") + defer response.Body.Close() - assert.Equal(t, http.StatusOK, getResponse.Code) + assert.Equal(t, http.StatusOK, response.StatusCode, "unexpected code") expectedGETResponse := api.APIFakeIntakeRouteStatsGETResponse{ Routes: map[string]api.RouteStat{ @@ -252,7 +243,7 @@ func TestServer(t *testing.T) { }, } actualGETResponse := api.APIFakeIntakeRouteStatsGETResponse{} - body, err := io.ReadAll(getResponse.Body) + body, err := io.ReadAll(response.Body) assert.NoError(t, err, "Error reading GET response") json.Unmarshal(body, &actualGETResponse) @@ -260,261 +251,303 @@ func TestServer(t *testing.T) { }) t.Run("should handle flush requests", func(t *testing.T) { - clock := clock.NewMock() - fi := NewServer(WithClock(clock)) - - postSomeFakePayloads(t, fi) - - request, err := http.NewRequest(http.MethodDelete, "/fakeintake/flushPayloads", nil) - assert.NoError(t, err, "Error creating flush request") - response := httptest.NewRecorder() + fi, _ := InitialiseForTests(t) + defer fi.Stop() - fi.handleFlushPayloads(response, request) - assert.Equal(t, http.StatusOK, response.Code, "unexpected code") + httpClient := http.Client{} + request, err := http.NewRequest(http.MethodDelete, fi.URL()+"/fakeintake/flushPayloads", nil) + require.NoError(t, err, "Error creating flush request") + response, err := httpClient.Do(request) + require.NoError(t, err, "Error on flush request") + defer response.Body.Close() + assert.Equal(t, http.StatusOK, response.StatusCode, "unexpected code") }) t.Run("should clean payloads older than 15 minutes", func(t *testing.T) { - clock := clock.NewMock() - fi := NewServer(WithClock(clock)) - fi.Start() - - postSomeFakePayloads(t, fi) + fi, clock := InitialiseForTests(t) + defer fi.Stop() - request, err := http.NewRequest(http.MethodGet, "/fakeintake/payloads?endpoint=/totoro", nil) - assert.NoError(t, err, "Error creating GET request") + PostSomeFakePayloads(t, fi.URL(), []TestTextPayload{ + { + Endpoint: "/totoro", + Data: "totoro|7|tag:valid,owner:pducolin", + }, + { + Endpoint: "/totoro", + Data: "totoro|5|tag:valid,owner:kiki", + }, + { + Endpoint: "/kiki", + Data: "I am just a poor raw log", + }, + }) clock.Add(10 * time.Minute) - response10Min := httptest.NewRecorder() - var getResponse10Min api.APIFakeIntakePayloadsRawGETResponse + response10Min, err := http.Get(fi.URL() + "/fakeintake/payloads?endpoint=/totoro") + require.NoError(t, err, "Error on GET request") + defer response10Min.Body.Close() - fi.handleGetPayloads(response10Min, request) + var getResponse10Min api.APIFakeIntakePayloadsRawGETResponse json.NewDecoder(response10Min.Body).Decode(&getResponse10Min) - assert.Len(t, getResponse10Min.Payloads, 2, "should contain two elements before cleanup %+v", getResponse10Min) clock.Add(10 * time.Minute) - response20Min := httptest.NewRecorder() + response20Min, err := http.Get(fi.URL() + "/fakeintake/payloads?endpoint=/totoro") + require.NoError(t, err, "Error on GET request") + defer response20Min.Body.Close() var getResponse20Min api.APIFakeIntakePayloadsRawGETResponse - - fi.handleGetPayloads(response20Min, request) - json.NewDecoder(response20Min.Body).Decode(&getResponse10Min) - + json.NewDecoder(response20Min.Body).Decode(&getResponse20Min) assert.Empty(t, getResponse20Min.Payloads, "should be empty after cleanup") - fi.Stop() }) t.Run("should clean payloads older than 15 minutes and keep recent payloads", func(t *testing.T) { - clock := clock.NewMock() - fi := NewServer(WithClock(clock)) - fi.Start() - - postSomeFakePayloads(t, fi) + fi, clock := InitialiseForTests(t) + defer fi.Stop() - request, err := http.NewRequest(http.MethodGet, "/fakeintake/payloads?endpoint=/totoro", nil) - assert.NoError(t, err, "Error creating GET request") + PostSomeFakePayloads(t, fi.URL(), []TestTextPayload{ + { + Endpoint: "/totoro", + Data: "totoro|7|tag:valid,owner:pducolin", + }, + { + Endpoint: "/totoro", + Data: "totoro|5|tag:valid,owner:kiki", + }, + { + Endpoint: "/kiki", + Data: "I am just a poor raw log", + }, + }) clock.Add(10 * time.Minute) - postSomeFakePayloads(t, fi) + PostSomeFakePayloads(t, fi.URL(), []TestTextPayload{ + { + Endpoint: "/totoro", + Data: "totoro|7|tag:valid,owner:ponyo", + }, + { + Endpoint: "/totoro", + Data: "totoro|5|tag:valid,owner:mei", + }, + }) - response10Min := httptest.NewRecorder() + response10Min, err := http.Get(fi.URL() + "/fakeintake/payloads?endpoint=/totoro") + require.NoError(t, err, "Error on GET request") + defer response10Min.Body.Close() var getResponse10Min api.APIFakeIntakePayloadsRawGETResponse - - fi.handleGetPayloads(response10Min, request) json.NewDecoder(response10Min.Body).Decode(&getResponse10Min) - assert.Len(t, getResponse10Min.Payloads, 4, "should contain 4 elements before cleanup") clock.Add(10 * time.Minute) - response20Min := httptest.NewRecorder() + response20Min, err := http.Get(fi.URL() + "/fakeintake/payloads?endpoint=/totoro") + require.NoError(t, err, "Error on GET request") + defer response20Min.Body.Close() var getResponse20Min api.APIFakeIntakePayloadsRawGETResponse - - fi.handleGetPayloads(response20Min, request) json.NewDecoder(response20Min.Body).Decode(&getResponse20Min) - assert.Len(t, getResponse20Min.Payloads, 2, "should contain 2 elements after cleanup of only older elements") fi.Stop() }) - t.Run("should clean parsed payloads", func(t *testing.T) { - clock := clock.NewMock() - fi := NewServer(WithClock(clock)) - fi.Start() - - request, err := http.NewRequest(http.MethodGet, "/fakeintake/payloads?endpoint=/api/v2/logs&format=json", nil) - assert.NoError(t, err, "Error creating GET request") + t.Run("should clean json parsed payloads", func(t *testing.T) { + fi, clock := InitialiseForTests(t) + defer fi.Stop() - postSomeRealisticPayloads(t, fi) + PostSomeRealisticLogs(t, fi.URL()) clock.Add(10 * time.Minute) - postSomeRealisticPayloads(t, fi) + PostSomeRealisticLogs(t, fi.URL()) - response10Min := httptest.NewRecorder() + response10Min, err := http.Get(fi.URL() + "/fakeintake/payloads?endpoint=/api/v2/logs&format=json") + require.NoError(t, err, "Error on GET request") + defer response10Min.Body.Close() var getResponse10Min api.APIFakeIntakePayloadsJsonGETResponse - - fi.handleGetPayloads(response10Min, request) json.NewDecoder(response10Min.Body).Decode(&getResponse10Min) - assert.Len(t, getResponse10Min.Payloads, 2, "should contain 2 elements before cleanup") clock.Add(10 * time.Minute) - response20Min := httptest.NewRecorder() + response20Min, err := http.Get(fi.URL() + "/fakeintake/payloads?endpoint=/api/v2/logs&format=json") + require.NoError(t, err, "Error on GET request") + defer response20Min.Body.Close() var getResponse20Min api.APIFakeIntakePayloadsJsonGETResponse - - fi.handleGetPayloads(response20Min, request) json.NewDecoder(response20Min.Body).Decode(&getResponse20Min) - assert.Len(t, getResponse20Min.Payloads, 1, "should contain 1 elements after cleanup of only older elements") - - fi.Stop() }) t.Run("should respond with custom response to /support/flare", func(t *testing.T) { - fi := NewServer() - fi.Start() + fi, _ := InitialiseForTests(t) defer fi.Stop() - request, err := http.NewRequest( - http.MethodPost, "/support/flare", strings.NewReader("totoro|5|tag:valid,owner:mei")) - require.NoError(t, err, "Error creating request") - - response := httptest.NewRecorder() - fi.handleDatadogRequest(response, request) - - assert.Equal(t, http.StatusOK, response.Code) - assert.Equal(t, "application/json", response.Header().Get("Content-Type")) - assert.Equal(t, `{}`, response.Body.String()) + response, err := http.Head(fi.URL() + "/support/flare") + require.NoError(t, err, "Error on HEAD request") + defer response.Body.Close() + + assert.Equal(t, http.StatusOK, response.StatusCode, "unexpected code") + assert.Equal(t, "application/json", response.Header.Get("Content-Type")) + contentLength, err := strconv.Atoi(response.Header.Get("Content-Length")) + require.NoError(t, err, "Error parsing Content-Length header") + assert.Equal(t, 2, contentLength, "unexpected Content-Length") + data, err := io.ReadAll(response.Body) + require.NoError(t, err, "Error reading response body") + assert.Empty(t, data, "unexpected HEAD response body") }) - t.Run("should accept response overrides", func(t *testing.T) { - fi := NewServer() - fi.Start() + t.Run("should accept POST response overrides", func(t *testing.T) { + fi, _ := InitialiseForTests(t) defer fi.Stop() body := api.ResponseOverride{ + Method: http.MethodPost, Endpoint: "/totoro", StatusCode: 200, ContentType: "text/plain", Body: []byte("catbus"), } - buf := new(bytes.Buffer) - err := json.NewEncoder(buf).Encode(body) + data := new(bytes.Buffer) + err := json.NewEncoder(data).Encode(body) require.NoError(t, err, "Error encoding request body") - request, err := http.NewRequest(http.MethodPost, "/fakeintake/configure/override", buf) + response, err := http.Post(fi.URL()+"/fakeintake/configure/override", "application/json", data) require.NoError(t, err, "Error creating POST request") + defer response.Body.Close() + assert.Equal(t, http.StatusOK, response.StatusCode, "unexpected code") + }) - response := httptest.NewRecorder() - fi.handleConfigureOverride(response, request) + t.Run("should accept GET response overrides", func(t *testing.T) { + fi, _ := InitialiseForTests(t) + defer fi.Stop() - expected := map[string]httpResponse{ - "/totoro": { - statusCode: http.StatusOK, - contentType: "text/plain", - body: []byte("catbus"), - }, - "/support/flare": { - statusCode: http.StatusOK, - contentType: "application/json", - data: flareResponseBody{CaseID: 0, Error: ""}, - body: []byte("{}"), - }, - "/api/v1/connections": { - statusCode: http.StatusOK, - contentType: "application/x-protobuf", - data: []byte{ - 0x03, 0x00, 0x17, 0x02, 0xf7, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x1a, 0x04, 0x08, 0x01, 0x10, 0x1e, - }, - body: []byte{ - 0x03, 0x00, 0x17, 0x02, 0xf7, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x1a, 0x04, 0x08, 0x01, 0x10, 0x1e, - }, - }, + body := api.ResponseOverride{ + Method: http.MethodGet, + Endpoint: "/totoro", + StatusCode: 200, + ContentType: "text/plain", + Body: []byte("catbus"), } - assert.Equal(t, expected, fi.responseOverrides) - assert.Equal(t, http.StatusOK, response.Code) + data := new(bytes.Buffer) + err := json.NewEncoder(data).Encode(body) + require.NoError(t, err, "Error encoding request body") + response, err := http.Post(fi.URL()+"/fakeintake/configure/override", "application/json", data) + require.NoError(t, err, "Error creating POST request") + defer response.Body.Close() + + assert.Equal(t, http.StatusOK, response.StatusCode, "unexpected code") }) t.Run("should respond with overridden response for matching endpoint", func(t *testing.T) { - fi := NewServer() - fi.Start() + fi, _ := InitialiseForTests(t) defer fi.Stop() - fi.responseOverrides["/totoro"] = httpResponse{ - statusCode: 200, - contentType: "text/plain", - body: []byte("catbus"), + body := api.ResponseOverride{ + Method: http.MethodGet, + Endpoint: "/totoro", + StatusCode: 200, + ContentType: "text/plain", + Body: []byte("catbus"), } + data := new(bytes.Buffer) + err := json.NewEncoder(data).Encode(body) + require.NoError(t, err, "Error encoding request body") + response, err := http.Post(fi.URL()+"/fakeintake/configure/override", "application/json", data) + require.NoError(t, err, "Error creating POST request") + defer response.Body.Close() + + response, err = http.Get(fi.URL() + "/totoro") + require.NoError(t, err, "Error on POST request") + defer response.Body.Close() + assert.Equal(t, http.StatusOK, response.StatusCode) + assert.Equal(t, "text/plain", response.Header.Get("Content-Type")) + responseBody, err := io.ReadAll(response.Body) + require.NoError(t, err, "Error reading response body") + assert.Equal(t, "catbus", string(responseBody)) + }) - request, err := http.NewRequest( - http.MethodPost, "/totoro", strings.NewReader("totoro|5|tag:valid,owner:mei")) - require.NoError(t, err, "Error creating request") - - response := httptest.NewRecorder() - fi.handleDatadogRequest(response, request) + t.Run("should respond with overridden response for matching endpoint", func(t *testing.T) { + fi, _ := InitialiseForTests(t) + defer fi.Stop() - assert.Equal(t, http.StatusOK, response.Code) - assert.Equal(t, "text/plain", response.Header().Get("Content-Type")) - assert.Equal(t, []byte("catbus"), response.Body.Bytes()) + body := api.ResponseOverride{ + Method: http.MethodPost, + Endpoint: "/totoro", + StatusCode: 200, + ContentType: "text/plain", + Body: []byte("catbus"), + } + data := new(bytes.Buffer) + err := json.NewEncoder(data).Encode(body) + require.NoError(t, err, "Error encoding request body") + response, err := http.Post(fi.URL()+"/fakeintake/configure/override", "application/json", data) + require.NoError(t, err, "Error creating POST request") + defer response.Body.Close() + + response, err = http.Post(fi.URL()+"/totoro", "text/plain", strings.NewReader("totoro|5|tag:valid,owner:pducolin")) + require.NoError(t, err, "Error on POST request") + defer response.Body.Close() + assert.Equal(t, http.StatusOK, response.StatusCode) + assert.Equal(t, "text/plain", response.Header.Get("Content-Type")) + responseBody, err := io.ReadAll(response.Body) + require.NoError(t, err, "Error reading response body") + assert.Equal(t, "catbus", string(responseBody)) }) t.Run("should respond with default response for non-matching endpoint", func(t *testing.T) { - fi := NewServer() - fi.Start() + fi, _ := InitialiseForTests(t) defer fi.Stop() - fi.responseOverrides["/totoro"] = httpResponse{ - statusCode: 200, - contentType: "text/plain", - body: []byte("catbus"), + body := api.ResponseOverride{ + Method: http.MethodPost, + Endpoint: "/totoro", + StatusCode: 200, + ContentType: "text/plain", + Body: []byte("catbus"), } + data := new(bytes.Buffer) + err := json.NewEncoder(data).Encode(body) + require.NoError(t, err, "Error encoding request body") + response, err := http.Post(fi.URL()+"/fakeintake/configure/override", "application/json", data) + require.NoError(t, err, "Error creating POST request") + defer response.Body.Close() - request, err := http.NewRequest( - http.MethodPost, "/kiki", strings.NewReader("kiki|4|tag:valid,owner:jiji")) - require.NoError(t, err, "Error creating request") - - response := httptest.NewRecorder() - fi.handleDatadogRequest(response, request) + response, err = http.Post(fi.URL()+"/kiki", "text/plain", strings.NewReader("kiki|4|tag:valid,owner:jiji")) + require.NoError(t, err, "Error on POST request") + defer response.Body.Close() - assert.Equal(t, http.StatusOK, response.Code) - assert.Equal(t, "application/json", response.Header().Get("Content-Type")) - assert.Equal(t, []byte(`{"errors":[]}`), response.Body.Bytes()) + assert.Equal(t, http.StatusOK, response.StatusCode) + assert.Equal(t, "application/json", response.Header.Get("Content-Type")) + responseBody, err := io.ReadAll(response.Body) + require.NoError(t, err, "Error reading response body") + assert.Equal(t, []byte(`{"errors":[]}`), responseBody) }) } -func postSomeFakePayloads(t *testing.T, fi *Server) { - request, err := http.NewRequest(http.MethodPost, "/totoro", strings.NewReader("totoro|7|tag:valid,owner:pducolin")) - require.NoError(t, err, "Error creating POST request") - postResponse := httptest.NewRecorder() - fi.handleDatadogRequest(postResponse, request) - - request, err = http.NewRequest(http.MethodPost, "/totoro", strings.NewReader("totoro|5|tag:valid,owner:kiki")) - require.NoError(t, err, "Error creating POST request") - postResponse = httptest.NewRecorder() - fi.handleDatadogRequest(postResponse, request) - - request, err = http.NewRequest(http.MethodPost, "/kiki", strings.NewReader("I am just a poor raw log")) - require.NoError(t, err, "Error creating POST request") - postResponse = httptest.NewRecorder() - fi.handleDatadogRequest(postResponse, request) +type TestTextPayload struct { + Endpoint string + Data string +} + +// PostSomeFakePayloads posts some fake payloads to the given url +func PostSomeFakePayloads(t *testing.T, url string, payloads []TestTextPayload) { + t.Helper() + for _, payload := range payloads { + url := url + payload.Endpoint + response, err := http.Post(url, "text/plain", strings.NewReader(payload.Data)) + require.NoError(t, err, fmt.Sprintf("Error on POST request to url %s with data: %s", url, payload.Data)) + defer response.Body.Close() + } } //go:embed fixtures/log_bytes var logBytes []byte -func postSomeRealisticPayloads(t *testing.T, fi *Server) { - request, err := http.NewRequest(http.MethodPost, "/api/v2/logs", bytes.NewBuffer(logBytes)) - require.NoError(t, err, "Error creating POST request") - request.Header.Set("Content-Encoding", "gzip") - postResponse := httptest.NewRecorder() - fi.handleDatadogRequest(postResponse, request) +func PostSomeRealisticLogs(t *testing.T, url string) { + t.Helper() + response, err := http.Post(url+"/api/v2/logs", "gzip", bytes.NewBuffer(logBytes)) + require.NoError(t, err, "Error on POST request") + defer response.Body.Close() } diff --git a/test/fakeintake/server/testhelper.go b/test/fakeintake/server/testhelper.go new file mode 100644 index 0000000000000..8fb40ecb7ee40 --- /dev/null +++ b/test/fakeintake/server/testhelper.go @@ -0,0 +1,29 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !windows + +package server + +import ( + "testing" + + "github.com/benbjohnson/clock" + "github.com/stretchr/testify/require" +) + +// InitialiseForTests starts a server with a mock clock and waits for it to be ready. +// It returns the mock clock and the server. Use defer server.Stop() to stop the server +// after calling this function. +func InitialiseForTests(t *testing.T) (*Server, *clock.Mock) { + t.Helper() + ready := make(chan bool, 1) + mockClock := clock.NewMock() + fi := NewServer(WithReadyChannel(ready), WithClock(mockClock)) + fi.Start() + isReady := <-ready + require.True(t, isReady) + return fi, mockClock +} diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index 3161179848dbb..bad446edf9879 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -22,7 +22,7 @@ require ( // `TEST_INFRA_DEFINITIONS_BUILDIMAGES` matches the commit sha in the module version // Example: github.com/DataDog/test-infra-definitions v0.0.0-YYYYMMDDHHmmSS-0123456789AB // => TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0123456789AB - github.com/DataDog/test-infra-definitions v0.0.0-20231213130016-281b2a324002 + github.com/DataDog/test-infra-definitions v0.0.0-20231215154848-67d2009bcd81 github.com/aws/aws-sdk-go-v2 v1.23.4 github.com/aws/aws-sdk-go-v2/config v1.25.10 github.com/aws/aws-sdk-go-v2/service/ec2 v1.138.1 diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index 868584692c207..2c9142957433b 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -12,8 +12,8 @@ github.com/DataDog/datadog-api-client-go/v2 v2.15.0 h1:5UVON1xs6Lul4d6R5TmLDqqSJ github.com/DataDog/datadog-api-client-go/v2 v2.15.0/go.mod h1:ZG8wS+y2rUmkRDJZQq7Og7EAPFPage+7vXcmuah2I9o= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a h1:m9REhmyaWD5YJ0P53ygRHxKKo+KM+nw+zz0hEdKztMo= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/test-infra-definitions v0.0.0-20231213130016-281b2a324002 h1:INEUQUD0zU5HlAMNb5BVOrAUun5gu3MjNviSIsZ+mmE= -github.com/DataDog/test-infra-definitions v0.0.0-20231213130016-281b2a324002/go.mod h1:pS50ENq41vbF+59otYFA/k2xh4Xar4+ZQSiMgF1vMLQ= +github.com/DataDog/test-infra-definitions v0.0.0-20231215154848-67d2009bcd81 h1:9YV5ebtavZcx+BZBzKs5oIa7JjDGceD6EderNH3c5fE= +github.com/DataDog/test-infra-definitions v0.0.0-20231215154848-67d2009bcd81/go.mod h1:pS50ENq41vbF+59otYFA/k2xh4Xar4+ZQSiMgF1vMLQ= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A= diff --git a/test/new-e2e/pkg/utils/e2e/stack_definition.go b/test/new-e2e/pkg/utils/e2e/stack_definition.go index 6a2a7333e14d0..c5a6655a83d06 100644 --- a/test/new-e2e/pkg/utils/e2e/stack_definition.go +++ b/test/new-e2e/pkg/utils/e2e/stack_definition.go @@ -6,9 +6,6 @@ package e2e import ( - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams" "github.com/DataDog/test-infra-definitions/components/datadog/agent" "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" "github.com/DataDog/test-infra-definitions/components/datadog/dockeragentparams" @@ -20,6 +17,10 @@ import ( "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2params" "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2vm" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams" ) // StackDefinition contains a Pulumi stack definition @@ -218,8 +219,7 @@ func FakeIntakeStackDef(options ...func(*AgentStackDefParam) error) *StackDefini } // fakeintakeExporter, err := aws.NewEcsFakeintake(vm.GetAwsEnvironment(), params.fakeintakeParams...) - fakeintakeOptions := append([]fakeintakeparams.Option{fakeintakeparams.WithoutLoadBalancer()}, params.fakeintakeParams...) - fakeintakeExporter, err := aws.NewEcsFakeintake(vm.GetAwsEnvironment(), fakeintakeOptions...) + fakeintakeExporter, err := aws.NewEcsFakeintake(vm.GetAwsEnvironment(), params.fakeintakeParams...) if err != nil { return nil, err diff --git a/test/new-e2e/tests/agent-platform/install-script/install_script_test.go b/test/new-e2e/tests/agent-platform/install-script/install_script_test.go index e4dd94c61a6f0..4e1777f85dd95 100644 --- a/test/new-e2e/tests/agent-platform/install-script/install_script_test.go +++ b/test/new-e2e/tests/agent-platform/install-script/install_script_test.go @@ -92,7 +92,6 @@ func TestInstallScript(t *testing.T) { cwsSupported = true } } - vmOpts = append(vmOpts, ec2params.WithImageName(platformJSON[*platform][*architecture][osVers], archMapping[*architecture], testOsType)) if instanceType, ok := os.LookupEnv("E2E_OVERRIDE_INSTANCE_TYPE"); ok { vmOpts = append(vmOpts, ec2params.WithInstanceType(instanceType)) diff --git a/test/new-e2e/tests/agent-platform/platforms/platforms.json b/test/new-e2e/tests/agent-platform/platforms/platforms.json index b3158631605b5..46a4584b2495c 100644 --- a/test/new-e2e/tests/agent-platform/platforms/platforms.json +++ b/test/new-e2e/tests/agent-platform/platforms/platforms.json @@ -1,7 +1,7 @@ { "debian": { "x86_64": { - "debian-9": "ami-099d228beefd189f5", + "debian-9": "ami-0182559468c1975fe", "debian-10": "ami-041540a5c191757a0", "debian-11": "ami-09e24b0cfe072ecef", "debian-12": "ami-06db4d78cb1d3bbf9" @@ -61,7 +61,7 @@ }, "suse": { "x86_64": { - "sles-12": "ami-09e1f60648b4fb117", + "sles-12": "ami-08d21b039336d9351", "sles-15": "ami-08f3662e2d5b3989a" }, "arm64": { diff --git a/test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go b/test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go new file mode 100644 index 0000000000000..938273f67e623 --- /dev/null +++ b/test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go @@ -0,0 +1,277 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. +package stepbystep + +import ( + "encoding/json" + "flag" + "fmt" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/params" + "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-platform/common" + filemanager "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-platform/common/file-manager" + helpers "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-platform/common/helper" + "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-platform/platforms" + e2eOs "github.com/DataDog/test-infra-definitions/components/os" + "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2os" + "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2params" + "github.com/stretchr/testify/require" + "os" + "strconv" + "strings" + "testing" +) + +var osVersion = flag.String("osversion", "", "os version to test") +var platform = flag.String("platform", "", "platform to test") +var cwsSupportedOsVersion = flag.String("cws-supported-osversion", "", "list of os where CWS is supported") +var architecture = flag.String("arch", "", "architecture to test (x86_64, arm64))") +var flavorName = flag.String("flavor", "datadog-agent", "package flavor to install") +var majorVersion = flag.String("major-version", "7", "major version to test (6, 7)") + +type stepByStepSuite struct { + e2e.Suite[e2e.VMEnv] + osVersion float64 + cwsSupported bool +} + +func ExecuteWithoutError(t *testing.T, client *common.TestClient, cmd string, args ...any) { + var finalCmd string + if len(args) > 0 { + finalCmd = fmt.Sprintf(cmd, args...) + } else { + finalCmd = cmd + } + _, err := client.VMClient.ExecuteWithError(finalCmd) + require.NoError(t, err) +} + +func TestStepByStepScript(t *testing.T) { + osMapping := map[string]ec2os.Type{ + "debian": ec2os.DebianOS, + "ubuntu": ec2os.UbuntuOS, + "centos": ec2os.CentOS, + "rhel": ec2os.RedHatOS, + "amazonlinux": ec2os.AmazonLinuxOS, + "redhat": ec2os.RedHatOS, + "windows": ec2os.WindowsOS, + "fedora": ec2os.FedoraOS, + "suse": ec2os.SuseOS, + } + + archMapping := map[string]e2eOs.Architecture{ + "x86_64": e2eOs.AMD64Arch, + "arm64": e2eOs.ARM64Arch, + } + + platformJSON := map[string]map[string]map[string]string{} + + err := json.Unmarshal(platforms.Content, &platformJSON) + require.NoErrorf(t, err, "failed to umarshall platform file: %v", err) + + osVersions := strings.Split(*osVersion, ",") + cwsSupportedOsVersionList := strings.Split(*cwsSupportedOsVersion, ",") + fmt.Println("Parsed platform json file: ", platformJSON) + for _, osVers := range osVersions { + vmOpts := []ec2params.Option{} + osVers := osVers + cwsSupported := false + for _, cwsSupportedOs := range cwsSupportedOsVersionList { + if cwsSupportedOs == osVers { + cwsSupported = true + } + } + + t.Run(fmt.Sprintf("test step by step on %s %s", osVers, *architecture), func(tt *testing.T) { + tt.Parallel() + fmt.Printf("Testing %s", osVers) + slice := strings.Split(osVers, "-") + var version float64 + if len(slice) == 2 { + version, err = strconv.ParseFloat(slice[1], 64) + if version == 610 { + version = 6.10 + } + require.NoError(tt, err) + } else if len(slice) == 3 { + version, err = strconv.ParseFloat(slice[1]+"."+slice[2], 64) + require.NoError(tt, err) + } else { + version = 0 + } + vmOpts = append(vmOpts, ec2params.WithImageName(platformJSON[*platform][*architecture][osVers], archMapping[*architecture], osMapping[*platform])) + if instanceType, ok := os.LookupEnv("E2E_OVERRIDE_INSTANCE_TYPE"); ok { + vmOpts = append(vmOpts, ec2params.WithInstanceType(instanceType)) + } + e2e.Run(tt, &stepByStepSuite{cwsSupported: cwsSupported, osVersion: version}, e2e.EC2VMStackDef(vmOpts...), params.WithStackName(fmt.Sprintf("step-by-step-test-%v-%v-%s-%s", os.Getenv("CI_PIPELINE_ID"), osVers, *architecture, *majorVersion))) + }) + } +} + +func (is *stepByStepSuite) TestStepByStep() { + fileManager := filemanager.NewUnixFileManager(is.Env().VM) + unixHelper := helpers.NewUnixHelper() + vm := is.Env().VM.(*client.PulumiStackVM) + agentClient, err := client.NewAgentClient(is.T(), vm, vm.GetOS(), false) + require.NoError(is.T(), err) + VMclient := common.NewTestClient(is.Env().VM, agentClient, fileManager, unixHelper) + + if *platform == "debian" || *platform == "ubuntu" { + is.StepByStepDebianTest(VMclient) + } else if *platform == "centos" || *platform == "amazonlinux" || *platform == "fedora" || *platform == "redhat" { + is.StepByStepRhelTest(VMclient) + } else { + require.Equal(is.T(), *platform, "suse", "NonSupportedPlatformError : %s isn't supported !", *platform) + is.StepByStepSuseTest(VMclient) + } + is.ConfigureAndRunAgentService(VMclient) + is.CheckStepByStepAgentInstallation(VMclient) + +} + +func (is *stepByStepSuite) ConfigureAndRunAgentService(VMclient *common.TestClient) { + is.T().Run("add config file", func(t *testing.T) { + ExecuteWithoutError(t, VMclient, "sudo sh -c \"sed 's/api_key:.*/api_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/' /etc/datadog-agent/datadog.yaml.example > /etc/datadog-agent/datadog.yaml\"") + ExecuteWithoutError(t, VMclient, "sudo sh -c \"chown dd-agent:dd-agent /etc/datadog-agent/datadog.yaml && chmod 640 /etc/datadog-agent/datadog.yaml\"") + if (*platform == "ubuntu" && is.osVersion == 14.04) || (*platform == "centos" && is.osVersion == 6.10) { + ExecuteWithoutError(t, VMclient, "sudo initctl start datadog-agent") + } else { + ExecuteWithoutError(t, VMclient, "sudo systemctl restart datadog-agent.service") + } + }) +} + +func (is *stepByStepSuite) CheckStepByStepAgentInstallation(VMclient *common.TestClient) { + common.CheckInstallation(is.T(), VMclient) + common.CheckAgentBehaviour(is.T(), VMclient) + common.CheckAgentStops(is.T(), VMclient) + common.CheckAgentRestarts(is.T(), VMclient) + common.CheckIntegrationInstall(is.T(), VMclient) + common.CheckAgentPython(is.T(), VMclient, "3") + if *majorVersion == "6" { + common.CheckAgentPython(is.T(), VMclient, "2") + } + common.CheckApmEnabled(is.T(), VMclient) + common.CheckApmDisabled(is.T(), VMclient) + if *flavorName == "datadog-agent" && is.cwsSupported { + common.CheckCWSBehaviour(is.T(), VMclient) + } + common.CheckUninstallation(is.T(), VMclient, *flavorName) +} + +func (is *stepByStepSuite) StepByStepDebianTest(VMclient *common.TestClient) { + var aptTrustedDKeyring = "/etc/apt/trusted.gpg.d/datadog-archive-keyring.gpg" + var aptUsrShareKeyring = "/usr/share/keyrings/datadog-archive-keyring.gpg" + var aptrepo = "[signed-by=/usr/share/keyrings/datadog-archive-keyring.gpg] http://apttesting.datad0g.com/" + var aptrepoDist = fmt.Sprintf("pipeline-%s-a%s-%s", os.Getenv("CI_PIPELINE_ID"), *majorVersion, *architecture) + fileManager := VMclient.FileManager + var err error + + is.T().Run("create /usr/share keyring and source list", func(t *testing.T) { + ExecuteWithoutError(t, VMclient, "sudo apt-get update && sudo DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl gnupg") + tmpFileContent := fmt.Sprintf("deb %s %s %s", aptrepo, aptrepoDist, *majorVersion) + _, err = fileManager.WriteFile("/etc/apt/sources.list.d/datadog.list", tmpFileContent) + require.NoError(t, err) + ExecuteWithoutError(t, VMclient, "sudo touch %s && sudo chmod a+r %s", aptUsrShareKeyring, aptUsrShareKeyring) + keys := []string{"DATADOG_APT_KEY_CURRENT.public", "DATADOG_APT_KEY_C0962C7D.public", "DATADOG_APT_KEY_F14F620E.public", "DATADOG_APT_KEY_382E94DE.public"} + for _, key := range keys { + ExecuteWithoutError(t, VMclient, "sudo curl --retry 5 -o \"/tmp/%s\" \"https://keys.datadoghq.com/%s\"", key, key) + ExecuteWithoutError(t, VMclient, "sudo cat \"/tmp/%s\" | sudo gpg --import --batch --no-default-keyring --keyring \"%s\"", key, aptUsrShareKeyring) + } + }) + if (*platform == "ubuntu" && is.osVersion < 15) || (*platform == "debian" && is.osVersion < 9) { + is.T().Run("create /etc/apt keyring", func(t *testing.T) { + ExecuteWithoutError(t, VMclient, "sudo cp %s %s", aptUsrShareKeyring, aptTrustedDKeyring) + }) + } + + is.T().Run("install debian", func(t *testing.T) { + ExecuteWithoutError(t, VMclient, "sudo apt-get update") + ExecuteWithoutError(is.T(), VMclient, "sudo apt-get install %s datadog-signing-keys -y -q", *flavorName) + }) +} + +func (is *stepByStepSuite) StepByStepRhelTest(VMclient *common.TestClient) { + var arch string + if *architecture == "arm64" { + arch = "aarch64" + } else { + arch = *architecture + } + var yumrepo = fmt.Sprintf("http://yumtesting.datad0g.com/testing/pipeline-%s-a%s/%s/%s/", + os.Getenv("CI_PIPELINE_ID"), *majorVersion, *majorVersion, arch) + fileManager := VMclient.FileManager + var err error + + var protocol = "https" + if is.osVersion < 6 { + protocol = "http" + } + var repogpgcheck = "1" + if is.osVersion < 8.2 { + repogpgcheck = "0" + } + + fileContent := fmt.Sprintf("[datadog]\n"+ + "name = Datadog, Inc.\n"+ + "baseurl = %s\n"+ + "enabled=1\n"+ + "gpgcheck=1\n"+ + "repo_gpgcheck=%s\n"+ + "gpgkey=%s://keys.datadoghq.com/DATADOG_RPM_KEY_CURRENT.public\n"+ + "\t%s://keys.datadoghq.com/DATADOG_RPM_KEY_B01082D3.public\n"+ + "\t%s://keys.datadoghq.com/DATADOG_RPM_KEY_FD4BF915.public\n"+ + "\t%s://keys.datadoghq.com/DATADOG_RPM_KEY_E09422B3.public", + yumrepo, repogpgcheck, protocol, protocol, protocol, protocol) + _, err = fileManager.WriteFile("/etc/yum.repos.d/datadog.repo", fileContent) + require.NoError(is.T(), err) + + is.T().Run("install rhel", func(t *testing.T) { + ExecuteWithoutError(t, VMclient, "sudo yum makecache -y") + ExecuteWithoutError(t, VMclient, "sudo yum install -y %s", *flavorName) + }) +} + +func (is *stepByStepSuite) StepByStepSuseTest(VMclient *common.TestClient) { + var arch string + if *architecture == "arm64" { + arch = "aarch64" + } else { + arch = *architecture + } + + var suseRepo = fmt.Sprintf("http://yumtesting.datad0g.com/suse/testing/pipeline-%s-a%s/%s/%s/", + os.Getenv("CI_PIPELINE_ID"), *majorVersion, *majorVersion, arch) + fileManager := VMclient.FileManager + var err error + + fileContent := fmt.Sprintf("[datadog]\n"+ + "name = Datadog, Inc.\n"+ + "baseurl = %s\n"+ + "enabled=1\n"+ + "gpgcheck=1\n"+ + "repo_gpgcheck=1\n"+ + "gpgkey=https://keys.datadoghq.com/DATADOG_RPM_KEY_CURRENT.public\n"+ + " https://keys.datadoghq.com/DATADOG_RPM_KEY_B01082D3.public\n"+ + " https://keys.datadoghq.com/DATADOG_RPM_KEY_FD4BF915.public\n"+ + " https://keys.datadoghq.com/DATADOG_RPM_KEY_E09422B3.public\n", + suseRepo) + _, err = fileManager.WriteFile("/etc/zypp/repos.d/datadog.repo", fileContent) + require.NoError(is.T(), err) + + is.T().Run("install suse", func(t *testing.T) { + ExecuteWithoutError(t, VMclient, "sudo curl -o /tmp/DATADOG_RPM_KEY_CURRENT.public https://keys.datadoghq.com/DATADOG_RPM_KEY_CURRENT.public") + ExecuteWithoutError(t, VMclient, "sudo rpm --import /tmp/DATADOG_RPM_KEY_CURRENT.public") + ExecuteWithoutError(t, VMclient, "sudo curl -o /tmp/DATADOG_RPM_KEY_B01082D3.public https://keys.datadoghq.com/DATADOG_RPM_KEY_B01082D3.public") + ExecuteWithoutError(t, VMclient, "sudo rpm --import /tmp/DATADOG_RPM_KEY_B01082D3.public") + ExecuteWithoutError(t, VMclient, "sudo curl -o /tmp/DATADOG_RPM_KEY_FD4BF915.public https://keys.datadoghq.com/DATADOG_RPM_KEY_FD4BF915.public") + ExecuteWithoutError(t, VMclient, "sudo rpm --import /tmp/DATADOG_RPM_KEY_FD4BF915.public") + ExecuteWithoutError(t, VMclient, "sudo curl -o /tmp/DATADOG_RPM_KEY_E09422B3.public https://keys.datadoghq.com/DATADOG_RPM_KEY_E09422B3.public") + ExecuteWithoutError(t, VMclient, "sudo rpm --import /tmp/DATADOG_RPM_KEY_E09422B3.public") + ExecuteWithoutError(t, VMclient, "sudo zypper --non-interactive --no-gpg-checks refresh datadog") + ExecuteWithoutError(t, VMclient, "sudo zypper --non-interactive install %s", *flavorName) + }) +} diff --git a/test/new-e2e/tests/agent-subcommands/flare/flare_nix_test.go b/test/new-e2e/tests/agent-subcommands/flare/flare_nix_test.go index 9ec2a45b830a2..e75848d3f3261 100644 --- a/test/new-e2e/tests/agent-subcommands/flare/flare_nix_test.go +++ b/test/new-e2e/tests/agent-subcommands/flare/flare_nix_test.go @@ -32,7 +32,6 @@ type linuxFlareSuite struct { } func TestLinuxFlareSuite(t *testing.T) { - t.Parallel() e2e.Run(t, &linuxFlareSuite{}, e2e.FakeIntakeStackDef(e2e.WithVMParams(ec2params.WithOS(ec2os.UbuntuOS)))) } diff --git a/test/new-e2e/tests/agent-subcommands/flare/flare_win_test.go b/test/new-e2e/tests/agent-subcommands/flare/flare_win_test.go index add1882fc2cf5..6b30c80223d5d 100644 --- a/test/new-e2e/tests/agent-subcommands/flare/flare_win_test.go +++ b/test/new-e2e/tests/agent-subcommands/flare/flare_win_test.go @@ -21,7 +21,6 @@ type windowsFlareSuite struct { } func TestWindowsFlareSuite(t *testing.T) { - t.Parallel() e2e.Run(t, &windowsFlareSuite{}, e2e.FakeIntakeStackDef(e2e.WithVMParams(ec2params.WithOS(ec2os.WindowsOS)))) } diff --git a/test/new-e2e/tests/agent-subcommands/secret/secret_nix_test.go b/test/new-e2e/tests/agent-subcommands/secret/secret_nix_test.go index f2c5e5f662620..8933a30db1dec 100644 --- a/test/new-e2e/tests/agent-subcommands/secret/secret_nix_test.go +++ b/test/new-e2e/tests/agent-subcommands/secret/secret_nix_test.go @@ -20,7 +20,6 @@ type linuxSecretSuite struct { } func TestLinuxSecretSuite(t *testing.T) { - t.Parallel() e2e.Run(t, &linuxSecretSuite{}, e2e.AgentStackDef(e2e.WithVMParams(ec2params.WithOS(ec2os.UbuntuOS)))) } diff --git a/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go b/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go index d77d94ceaf527..21746c7824a1a 100644 --- a/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go +++ b/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go @@ -21,7 +21,6 @@ type windowsSecretSuite struct { } func TestWindowsSecretSuite(t *testing.T) { - t.Parallel() e2e.Run(t, &windowsSecretSuite{}, e2e.AgentStackDef(e2e.WithVMParams(ec2params.WithOS(ec2os.WindowsOS)))) } diff --git a/test/new-e2e/tests/agent-subcommands/subcommands_test.go b/test/new-e2e/tests/agent-subcommands/subcommands_test.go index 1891fa3ec5983..de3202937389d 100644 --- a/test/new-e2e/tests/agent-subcommands/subcommands_test.go +++ b/test/new-e2e/tests/agent-subcommands/subcommands_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/DataDog/datadog-agent/test/fakeintake/api" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" @@ -232,3 +233,27 @@ func (v *subcommandWithFakeIntakeSuite) TestDefaultInstallHealthy() { assert.NoError(v.T(), err) assert.Contains(v.T(), output, "Agent health: PASS") } + +func (v *subcommandWithFakeIntakeSuite) TestDefaultInstallUnhealthy() { + // the fakeintake says that any API key is invalid by sending a 403 code + override := api.ResponseOverride{ + Endpoint: "/api/v1/validate", + StatusCode: 403, + ContentType: "text/plain", + Body: []byte("invalid API key"), + } + v.Env().Fakeintake.Client.ConfigureOverride(override) + + // restart the agent, which validates the key using the fakeintake at startup + v.UpdateEnv(e2e.FakeIntakeStackDef( + e2e.WithAgentParams(agentparams.WithAgentConfig("log_level: info\n")), + )) + + // agent should be unhealthy because the key is invalid + _, err := v.Env().Agent.Health() + if err == nil { + assert.Fail(v.T(), "agent expected to be unhealthy, but no error found!") + return + } + assert.Contains(v.T(), err.Error(), "Agent health: FAIL") +} diff --git a/test/new-e2e/tests/ndm/snmp/compose/snmpCompose.yaml b/test/new-e2e/tests/ndm/snmp/compose/snmpCompose.yaml index 07aca8b4aaf1a..0a65323914335 100644 --- a/test/new-e2e/tests/ndm/snmp/compose/snmpCompose.yaml +++ b/test/new-e2e/tests/ndm/snmp/compose/snmpCompose.yaml @@ -11,5 +11,3 @@ services: agent: volumes: - ${CONFIG_DIR}/snmp.yaml:/etc/datadog-agent/conf.d/snmp.d/snmp.yaml - container_name: dd-agent - diff --git a/test/new-e2e/tests/ndm/snmp/snmpTestEnv.go b/test/new-e2e/tests/ndm/snmp/snmp_test.go similarity index 61% rename from test/new-e2e/tests/ndm/snmp/snmpTestEnv.go rename to test/new-e2e/tests/ndm/snmp/snmp_test.go index 449b3decb9dcc..c699d6755fa75 100644 --- a/test/new-e2e/tests/ndm/snmp/snmpTestEnv.go +++ b/test/new-e2e/tests/ndm/snmp/snmp_test.go @@ -7,30 +7,24 @@ package snmp import ( - "context" "embed" - "errors" "path" + "testing" + "time" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/infra" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" "github.com/DataDog/test-infra-definitions/components/datadog/agent" "github.com/DataDog/test-infra-definitions/components/datadog/dockeragentparams" "github.com/DataDog/test-infra-definitions/scenarios/aws" + "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2os" + "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2params" "github.com/DataDog/test-infra-definitions/scenarios/aws/vm/ec2vm" + "github.com/stretchr/testify/assert" - "github.com/pulumi/pulumi/sdk/v3/go/auto" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) -// TestEnv implements a test environment for NDM. Deprecated, should port to TestSuite -type TestEnv struct { - context context.Context - name string - - InstanceIP string - StackOutput auto.UpResult -} - //go:embed compose/snmpCompose.yaml var snmpCompose string @@ -41,96 +35,79 @@ const ( composeDataPath = "compose/data" ) -// NewTestEnv creates a new test environment for NDM. Deprecated, should port to NDM -func NewTestEnv() (*TestEnv, error) { - snmpTestEnv := &TestEnv{ - context: context.Background(), - name: "snmp-agent", - } - - stackManager := infra.GetStackManager() - - _, upResult, err := stackManager.GetStack(snmpTestEnv.context, snmpTestEnv.name, nil, func(ctx *pulumi.Context) error { +// snmpDockerStackDef defines a stack with a docker agent on an AmazonLinuxDockerOS VM +// with snmpsim installed and configured with snmp recordings +func snmpDockerStackDef() *e2e.StackDefinition[e2e.DockerEnv] { + return e2e.EnvFactoryStackDef(func(ctx *pulumi.Context) (*e2e.DockerEnv, error) { // setup VM - vm, err := ec2vm.NewUnixEc2VM(ctx) + vm, err := ec2vm.NewUnixEc2VM(ctx, ec2params.WithOS(ec2os.AmazonLinuxDockerOS)) if err != nil { - return err + return nil, err } fakeintakeExporter, err := aws.NewEcsFakeintake(vm.GetAwsEnvironment()) if err != nil { - return err + return nil, err } filemanager := vm.GetFileManager() // upload snmpsim data files createDataDirCommand, dataPath, err := filemanager.TempDirectory("data") if err != nil { - return err + return nil, err } dataFiles, err := loadDataFileNames() if err != nil { - return err + return nil, err } + fileCommands := []pulumi.Resource{} for _, fileName := range dataFiles { fileContent, err := dataFolder.ReadFile(path.Join(composeDataPath, fileName)) if err != nil { - return err + return nil, err } dontUseSudo := false fileCommand, err := filemanager.CopyInlineFile(pulumi.String(fileContent), path.Join(dataPath, fileName), dontUseSudo, pulumi.DependsOn([]pulumi.Resource{createDataDirCommand})) if err != nil { - return err + return nil, err } fileCommands = append(fileCommands, fileCommand) } createConfigDirCommand, configPath, err := filemanager.TempDirectory("config") if err != nil { - return err + return nil, err } // edit snmp config file dontUseSudo := false configCommand, err := filemanager.CopyInlineFile(pulumi.String(snmpConfig), path.Join(configPath, "snmp.yaml"), dontUseSudo, pulumi.DependsOn([]pulumi.Resource{createConfigDirCommand})) if err != nil { - return err + return nil, err } // install agent and snmpsim on docker envVars := pulumi.StringMap{"DATA_DIR": pulumi.String(dataPath), "CONFIG_DIR": pulumi.String(configPath)} composeDependencies := []pulumi.Resource{createDataDirCommand, configCommand} composeDependencies = append(composeDependencies, fileCommands...) - _, err = agent.NewDaemon( + docker, err := agent.NewDaemon( vm, dockeragentparams.WithFakeintake(fakeintakeExporter), dockeragentparams.WithExtraComposeManifest("snmpsim", snmpCompose), dockeragentparams.WithEnvironmentVariables(envVars), dockeragentparams.WithPulumiDependsOn(pulumi.DependsOn(composeDependencies)), ) - return err - }, false) - if err != nil { - return nil, err - } - - snmpTestEnv.StackOutput = upResult - - output, found := upResult.Outputs["instance-ip"] - - if !found { - return nil, errors.New("unable to find host ip") - } - snmpTestEnv.InstanceIP = output.Value.(string) - - return snmpTestEnv, nil -} - -// Destroy delete the NDM stack. Deprecated, should port to NDM -func (testEnv *TestEnv) Destroy() error { - return infra.GetStackManager().DeleteStack(testEnv.context, testEnv.name, nil) + if err != nil { + return nil, err + } + return &e2e.DockerEnv{ + Docker: client.NewDocker(docker), + VM: client.NewPulumiStackVM(vm), + Fakeintake: client.NewFakeintake(fakeintakeExporter), + }, nil + }) } //go:embed compose/data @@ -146,3 +123,23 @@ func loadDataFileNames() (out []string, err error) { } return out, nil } + +type snmpDockerSuite struct { + e2e.Suite[e2e.DockerEnv] +} + +// TestSnmpSuite runs the snmp e2e suite +func TestSnmpSuite(t *testing.T) { + e2e.Run(t, &snmpDockerSuite{}, snmpDockerStackDef()) +} + +// TestSnmp tests that the snmpsim container is running and that the agent container +// is sending snmp metrics to the fakeintake +func (s *snmpDockerSuite) TestSnmp() { + fakeintake := s.Env().Fakeintake + s.EventuallyWithT(func(c *assert.CollectT) { + metrics, err := fakeintake.GetMetricNames() + assert.NoError(c, err) + assert.Contains(c, metrics, "snmp.sysUpTimeInstance", "metrics %v doesn't contain snmp.sysUpTimeInstance", metrics) + }, 5*time.Minute, 10*time.Second) +} diff --git a/test/regression/cases/process_agent_real_time_mode/datadog-agent/datadog.yaml b/test/regression/cases/process_agent_real_time_mode/datadog-agent/datadog.yaml new file mode 100644 index 0000000000000..aee29a8c34785 --- /dev/null +++ b/test/regression/cases/process_agent_real_time_mode/datadog-agent/datadog.yaml @@ -0,0 +1,13 @@ +auth_token_file_path: /tmp/agent-auth-token +hostname: smp-regression + +dd_url: http://127.0.0.1:9092 + +confd_path: /etc/datadog-agent/conf.d + +# Disable cloud detection. This stops the Agent from poking around the +# execution environment & network. This is particularly important if the target +# has network access. +cloud_provider_metadata: [] + +dogstatsd_socket: '/tmp/dsd.socket' diff --git a/test/regression/cases/process_agent_real_time_mode/experiment.yaml b/test/regression/cases/process_agent_real_time_mode/experiment.yaml new file mode 100644 index 0000000000000..005f9511e733a --- /dev/null +++ b/test/regression/cases/process_agent_real_time_mode/experiment.yaml @@ -0,0 +1,17 @@ +optimization_goal: memory +erratic: false + +environment: + DD_TELEMETRY_ENABLED: true + DD_PROCESS_CONFIG_PROCESS_DD_URL: http://127.0.0.1:9092 + # For regression detection we only care about the processes generated inside the container + # so this disables checking of the processes of the host the container is running on + HOST_PROC: /tmp/procfs + DD_API_KEY: 00000001 + +profiling_environment: + DD_INTERNAL_PROFILING_ENABLED: true + DD_INTERNAL_PROFILING_UNIX_SOCKET: /var/run/datadog/apm.socket + DD_INTERNAL_PROFILING_DELTA_PROFILES: true + DD_INTERNAL_PROFILING_ENABLE_GOROUTINE_STACKTRACES: true + HOST_PROC: /tmp/procfs diff --git a/test/regression/cases/process_agent_real_time_mode/lading/lading.yaml b/test/regression/cases/process_agent_real_time_mode/lading/lading.yaml new file mode 100644 index 0000000000000..ac61bdd3b0c9f --- /dev/null +++ b/test/regression/cases/process_agent_real_time_mode/lading/lading.yaml @@ -0,0 +1,21 @@ +generator: + - proc_fs: + seed: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, + 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131] + root: /tmp/procfs + copy_from_host: + - /proc/uptime + - /proc/stat + - /proc/cpuinfo + total_processes: 128 + +blackhole: + - http: + binding_addr: "127.0.0.1:9092" + body_variant: "raw_bytes" + # process agent RT mode enabled response + raw_bytes: [0x1, 0x0, 0x17, 0x0, 0xa, 0x2, 0x20, 0x17, 0x1a, 0x4, 0x8, 0x2, 0x10, 0x2] + +target_metrics: + - prometheus: + uri: "http://127.0.0.1:5000/telemetry" diff --git a/test/regression/cases/process_agent_standard_check/datadog-agent/datadog.yaml b/test/regression/cases/process_agent_standard_check/datadog-agent/datadog.yaml new file mode 100644 index 0000000000000..bf270d87fd2c4 --- /dev/null +++ b/test/regression/cases/process_agent_standard_check/datadog-agent/datadog.yaml @@ -0,0 +1,16 @@ +api_key: 00000000000000000000000000000000 +auth_token_file_path: /tmp/agent-auth-token +hostname: smp-regression + +dd_url: http://127.0.0.1:9092 + +confd_path: /etc/datadog-agent/conf.d + +# Disable cloud detection. This stops the Agent from poking around the +# execution environment & network. This is particularly important if the target +# has network access. +cloud_provider_metadata: [] + +process_config: + process_collection: + enabled: true diff --git a/test/regression/cases/process_agent_standard_check/experiment.yaml b/test/regression/cases/process_agent_standard_check/experiment.yaml new file mode 100644 index 0000000000000..005f9511e733a --- /dev/null +++ b/test/regression/cases/process_agent_standard_check/experiment.yaml @@ -0,0 +1,17 @@ +optimization_goal: memory +erratic: false + +environment: + DD_TELEMETRY_ENABLED: true + DD_PROCESS_CONFIG_PROCESS_DD_URL: http://127.0.0.1:9092 + # For regression detection we only care about the processes generated inside the container + # so this disables checking of the processes of the host the container is running on + HOST_PROC: /tmp/procfs + DD_API_KEY: 00000001 + +profiling_environment: + DD_INTERNAL_PROFILING_ENABLED: true + DD_INTERNAL_PROFILING_UNIX_SOCKET: /var/run/datadog/apm.socket + DD_INTERNAL_PROFILING_DELTA_PROFILES: true + DD_INTERNAL_PROFILING_ENABLE_GOROUTINE_STACKTRACES: true + HOST_PROC: /tmp/procfs diff --git a/test/regression/cases/process_agent_standard_check/lading/lading.yaml b/test/regression/cases/process_agent_standard_check/lading/lading.yaml new file mode 100644 index 0000000000000..7c2239a5b4fd8 --- /dev/null +++ b/test/regression/cases/process_agent_standard_check/lading/lading.yaml @@ -0,0 +1,21 @@ +generator: + - proc_fs: + seed: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, + 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131] + root: /tmp/procfs + copy_from_host: + - /proc/uptime + - /proc/stat + - /proc/cpuinfo + total_processes: 128 + +blackhole: + - http: + binding_addr: "127.0.0.1:9092" + body_variant: "raw_bytes" + # process agent RT mode disabled response + raw_bytes: [0x1, 0x0, 0x17, 0x0, 0xa, 0x2, 0x20, 0x17, 0x1a, 0x2, 0x10, 0xa] + +target_metrics: + - prometheus: + uri: "http://127.0.0.1:5000/telemetry" diff --git a/test/regression/cases/process_agent_standard_check_with_stats/datadog-agent/datadog.yaml b/test/regression/cases/process_agent_standard_check_with_stats/datadog-agent/datadog.yaml new file mode 100644 index 0000000000000..bf270d87fd2c4 --- /dev/null +++ b/test/regression/cases/process_agent_standard_check_with_stats/datadog-agent/datadog.yaml @@ -0,0 +1,16 @@ +api_key: 00000000000000000000000000000000 +auth_token_file_path: /tmp/agent-auth-token +hostname: smp-regression + +dd_url: http://127.0.0.1:9092 + +confd_path: /etc/datadog-agent/conf.d + +# Disable cloud detection. This stops the Agent from poking around the +# execution environment & network. This is particularly important if the target +# has network access. +cloud_provider_metadata: [] + +process_config: + process_collection: + enabled: true diff --git a/test/regression/cases/process_agent_standard_check_with_stats/datadog-agent/system-probe.yaml b/test/regression/cases/process_agent_standard_check_with_stats/datadog-agent/system-probe.yaml new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/test/regression/cases/process_agent_standard_check_with_stats/experiment.yaml b/test/regression/cases/process_agent_standard_check_with_stats/experiment.yaml new file mode 100644 index 0000000000000..d1a8ec9651ba5 --- /dev/null +++ b/test/regression/cases/process_agent_standard_check_with_stats/experiment.yaml @@ -0,0 +1,18 @@ +optimization_goal: memory +erratic: false + +environment: + DD_TELEMETRY_ENABLED: true + DD_PROCESS_CONFIG_PROCESS_DD_URL: http://127.0.0.1:9092 + # For regression detection we only care about the processes generated inside the container + # so this disables checking of the processes of the host the container is running on + HOST_PROC: /tmp/procfs + DD_SYSTEM_PROBE_PROCESS_ENABLED: true + DD_API_KEY: 00000001 + +profiling_environment: + DD_INTERNAL_PROFILING_ENABLED: true + DD_INTERNAL_PROFILING_UNIX_SOCKET: /var/run/datadog/apm.socket + DD_INTERNAL_PROFILING_DELTA_PROFILES: true + DD_INTERNAL_PROFILING_ENABLE_GOROUTINE_STACKTRACES: true + HOST_PROC: /tmp/procfs diff --git a/test/regression/cases/process_agent_standard_check_with_stats/lading/lading.yaml b/test/regression/cases/process_agent_standard_check_with_stats/lading/lading.yaml new file mode 100644 index 0000000000000..7c2239a5b4fd8 --- /dev/null +++ b/test/regression/cases/process_agent_standard_check_with_stats/lading/lading.yaml @@ -0,0 +1,21 @@ +generator: + - proc_fs: + seed: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, + 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131] + root: /tmp/procfs + copy_from_host: + - /proc/uptime + - /proc/stat + - /proc/cpuinfo + total_processes: 128 + +blackhole: + - http: + binding_addr: "127.0.0.1:9092" + body_variant: "raw_bytes" + # process agent RT mode disabled response + raw_bytes: [0x1, 0x0, 0x17, 0x0, 0xa, 0x2, 0x20, 0x17, 0x1a, 0x2, 0x10, 0xa] + +target_metrics: + - prometheus: + uri: "http://127.0.0.1:5000/telemetry"