diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 2b6ec65cf1e95c..480b3e4deb4867 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -526,6 +526,7 @@ /test/new-e2e/tests/agent-metric-logs @DataDog/agent-metrics-logs /test/new-e2e/tests/windows @DataDog/windows-agent @DataDog/windows-kernel-integrations /test/new-e2e/tests/apm @DataDog/agent-apm +/test/new-e2e/tests/remote-config @DataDog/remote-config /test/system/ @DataDog/agent-shared-components /test/system/dogstatsd/ @DataDog/agent-metrics-logs /test/benchmarks/apm_scripts/ @DataDog/agent-apm diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 4faeaff8349ba9..f863248aa0d269 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,12 +1,12 @@ ### What does this PR do? @@ -44,22 +44,3 @@ * Write here in detail or link to detailed instructions on how this change can be tested/QAd/validated, including any environment setup. --> - -### Reviewer's Checklist - - -- [ ] If known, an appropriate milestone has been selected; otherwise the `Triage` milestone is set. -- [ ] Use the `major_change` label if your change either has a major impact on the code base, is impacting multiple teams or is changing important well-established internals of the Agent. This label will be use during QA to make sure each team pay extra attention to the changed behavior. For any customer facing change use a releasenote. -- [ ] A [release note](https://github.com/DataDog/datadog-agent/blob/main/docs/dev/contributing.md#reno) has been added or the `changelog/no-changelog` label has been applied. -- [ ] Changed code has automated tests for its functionality. -- [ ] Adequate QA/testing plan information is provided. Except if the `qa/skip-qa` label, with required either `qa/done` or `qa/no-code-change` labels, are applied. -- [ ] At least one `team/..` label has been applied, indicating the team(s) that should QA this change. -- [ ] If applicable, docs team has been notified or [an issue has been opened on the documentation repo](https://github.com/DataDog/documentation/issues/new). -- [ ] If applicable, the `need-change/operator` and `need-change/helm` labels have been applied. -- [ ] If applicable, the `k8s/` label, indicating the lowest Kubernetes version compatible with this feature. -- [ ] If applicable, the [config template](https://github.com/DataDog/datadog-agent/blob/main/pkg/config/config_template.yaml) has been updated. diff --git a/.github/workflows/label-analysis.yml b/.github/workflows/label-analysis.yml index b0266b765969e6..6954e047c88099 100644 --- a/.github/workflows/label-analysis.yml +++ b/.github/workflows/label-analysis.yml @@ -13,14 +13,26 @@ env: GH_REPO: ${{ github.repository }} jobs: + fetch-labels: + if: github.triggering_actor != 'dd-devflow[bot]' + runs-on: ubuntu-latest + outputs: + LABELS: ${{ steps.pr-labels.outputs.LABELS }} + steps: + - name: Get PR labels + id: pr-labels + run: | + labels=$(gh pr view ${{github.event.number}} --json labels --jq '.labels[].name' | tr '\n' ' ') + echo "Fetched labels for PR ${{github.event.number}}: $labels" + echo "LABELS=$labels" >> "$GITHUB_OUTPUT" team-label: + needs: fetch-labels if: github.triggering_actor != 'dd-devflow[bot]' runs-on: ubuntu-latest steps: - name: Check team assignment run: | - labels=$(gh pr view ${{github.event.number}} --json labels --jq '.labels[].name') - for label in $labels; do + for label in $LABELS; do if [[ "$label" =~ ^qa/ ]]; then echo "A label to skip QA is set -- no need for team assignment" exit 0 @@ -32,15 +44,29 @@ jobs: done echo "PR ${{github.event.number}} requires at least one non-triage team assignment label (label starting by 'team/')" exit 1 + env: + LABELS: ${{ needs.fetch-labels.outputs.LABELS}} skip-qa: + needs: fetch-labels if: github.triggering_actor != 'dd-devflow[bot]' runs-on: ubuntu-latest steps: - name: Check qa/[done|no-code-change] labels are not set together run: | - labels=$(gh pr view ${{github.event.number}} --json labels --jq '.labels[].name') - if [[ "$labels" =~ ^qa/done && "$labels" =~ ^qa/no-code-change ]]; then + is_qa_done=1 + is_qa_no_code_change=1 + for label in $LABELS; do + if [[ "$label" == "qa/done" ]]; then + is_qa_done=0 + fi + if [[ "$label" == "qa/no-code-change" ]]; then + is_qa_no_code_change=0 + fi + done + if [ $is_qa_done -eq 0 ] && [ $is_qa_no_code_change -eq 0 ]; then echo "Both 'qa/done' and 'qa/no-code-change' labels are set -- only one of them should be set" exit 1 fi echo "No issue with 'qa/done' and 'qa/no-code-change' labels" + env: + LABELS: ${{ needs.fetch-labels.outputs.LABELS}} diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index db97388746d1df..f78b67838cf8aa 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -185,7 +185,7 @@ variables: # To use images from test-infra-definitions dev branches, set the SUFFIX variable to -dev # and check the job creating the image to make sure you have the right SHA prefix TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX: "" - TEST_INFRA_DEFINITIONS_BUILDIMAGES: 9f1c04270e18 + TEST_INFRA_DEFINITIONS_BUILDIMAGES: 6ba25fcf6a61 DATADOG_AGENT_BUILDERS: v22276738-b36b132 DATADOG_AGENT_EMBEDDED_PATH: /opt/datadog-agent/embedded @@ -967,21 +967,28 @@ workflow: - when: manual allow_failure: true -.on_windows_installer_changes_or_manual: - - <<: *if_main_branch - - <<: *if_mergequeue - when: never - - changes: +.if_windows_installer_changes: &if_windows_installer_changes + changes: paths: - tools/windows/DatadogAgentInstaller/**/* - .gitlab/new-e2e_testing/windows.yml - test/new-e2e/tests/windows/install-test/**/* - tasks/msi.py compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 + +.on_windows_installer_changes_or_manual: + - <<: *if_main_branch + - <<: *if_mergequeue + when: never + - <<: *if_windows_installer_changes when: on_success - when: manual allow_failure: true +.except_windows_installer_changes: + - <<: *if_windows_installer_changes + when: never + .on_system_probe_or_e2e_changes_or_manual: - <<: *if_main_branch - <<: *if_mergequeue @@ -1083,6 +1090,25 @@ workflow: when: manual allow_failure: true +.on_apm_or_e2e_changes_or_manual: + - <<: *if_disable_e2e + when: never + - <<: *if_main_branch + when: on_success + - <<: *if_mergequeue + when: never + - changes: + paths: + - pkg/trace/**/* + - cmd/trace-agent/**/* + - comp/trace/**/* + - test/new-e2e/tests/apm/**/* + - test/new-e2e/go.mod + compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 + when: on_success + - when: manual + allow_failure: true + .on_trace_agent_changes_or_manual: - changes: - pkg/trace/**/* diff --git a/.gitlab/e2e.yml b/.gitlab/e2e.yml index 250bb2003144ef..00e6a1b1efe917 100644 --- a/.gitlab/e2e.yml +++ b/.gitlab/e2e.yml @@ -97,9 +97,10 @@ k8s-e2e-otlp-main: E2E_PRIVATE_KEY_PATH: /tmp/agent-qa-ssh-key E2E_KEY_PAIR_NAME: datadog-agent-ci-rsa E2E_PIPELINE_ID: $CI_PIPELINE_ID + E2E_COMMIT_SHA: $CI_COMMIT_SHORT_SHA E2E_OUTPUT_DIR: $CI_PROJECT_DIR/e2e-output script: - - inv -e new-e2e-tests.run --targets $TARGETS -c ddagent:fullImagePath=669783387624.dkr.ecr.us-east-1.amazonaws.com/agent:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA} -c ddagent:clusterAgentFullImagePath=669783387624.dkr.ecr.us-east-1.amazonaws.com/cluster-agent:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA} -c dddogstatsd:fullImagePath=669783387624.dkr.ecr.us-east-1.amazonaws.com/dogstatsd:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA} -c ddagent:imagePullRegistry=669783387624.dkr.ecr.us-east-1.amazonaws.com -c ddagent:imagePullUsername=AWS -c ddagent:imagePullPassword=$(aws ecr get-login-password) --junit-tar "junit-${CI_JOB_NAME}.tgz" ${EXTRA_PARAMS} + - inv -e new-e2e-tests.run --targets $TARGETS -c ddagent:imagePullRegistry=669783387624.dkr.ecr.us-east-1.amazonaws.com -c ddagent:imagePullUsername=AWS -c ddagent:imagePullPassword=$(aws ecr get-login-password) --junit-tar "junit-${CI_JOB_NAME}.tgz" ${EXTRA_PARAMS} artifacts: expire_in: 2 weeks when: always @@ -217,7 +218,7 @@ new-e2e-npm-dev: rules: - !reference [.if_run_e2e_tests] - !reference [.on_dev_branch_manual] - needs: ["deploy_deb_testing-a7_x64", "deploy_windows_testing-a7"] + needs: ["deploy_deb_testing-a7_x64", "deploy_windows_testing-a7", "qa_agent"] variables: TARGETS: ./tests/npm @@ -316,28 +317,12 @@ new-e2e-orchestrator-main: allow_failure: true #TODO: Remove when https://github.com/DataDog/datadog-agent/pull/22113 is merged -new-e2e-apm-dev: +new-e2e-apm: extends: .new_e2e_template rules: - - !reference [.if_run_e2e_tests] - - !reference [.on_dev_branch_manual] + !reference [.on_apm_or_e2e_changes_or_manual] needs: - qa_agent - - qa_dca - - qa_dogstatsd - variables: - TARGETS: ./tests/apm - TEAM: apm-agent - parallel: - matrix: - - EXTRA_PARAMS: --run TestDockerFakeintakeSuiteUDS - - EXTRA_PARAMS: --run TestDockerFakeintakeSuiteTCP - - EXTRA_PARAMS: --run TestVMFakeintakeSuiteUDS - - EXTRA_PARAMS: --run TestVMFakeintakeSuiteTCP - -new-e2e-apm-main: - extends: .new_e2e_template - rules: !reference [.on_main_or_rc_and_no_skip_e2e] variables: TARGETS: ./tests/apm TEAM: apm-agent diff --git a/.gitlab/e2e_test_junit_upload.yml b/.gitlab/e2e_test_junit_upload.yml index 2e4eb1f3115a54..08cfdba6d2bca2 100644 --- a/.gitlab/e2e_test_junit_upload.yml +++ b/.gitlab/e2e_test_junit_upload.yml @@ -120,5 +120,5 @@ e2e_test_junit_upload: - new-e2e-process-main - new-e2e-cws-main - new-e2e-orchestrator-main - - new-e2e-apm-main + - new-e2e-apm - new-e2e-remote-config diff --git a/.gitlab/kitchen_testing/windows.yml b/.gitlab/kitchen_testing/windows.yml index 2f73047b34fbb1..93baba06556c05 100644 --- a/.gitlab/kitchen_testing/windows.yml +++ b/.gitlab/kitchen_testing/windows.yml @@ -176,21 +176,11 @@ kitchen_windows_upgrade5_agent-a7: - .kitchen_scenario_windows_a7 - .kitchen_test_upgrade5_agent -kitchen_windows_upgrade6_agent-a6: - extends: - - .kitchen_scenario_windows_a6 - - .kitchen_test_upgrade6_agent - kitchen_windows_upgrade6_agent-a7: extends: - .kitchen_scenario_windows_a7 - .kitchen_test_upgrade6_agent -kitchen_windows_upgrade7_agent-a7: - extends: - - .kitchen_scenario_windows_a7 - - .kitchen_test_upgrade7_agent - kitchen_windows_process_agent-a7: variables: KITCHEN_OSVERS: "win2022" diff --git a/.gitlab/kitchen_tests_upload.yml b/.gitlab/kitchen_tests_upload.yml index 2a95180adcd95c..e5f473c3d0affd 100644 --- a/.gitlab/kitchen_tests_upload.yml +++ b/.gitlab/kitchen_tests_upload.yml @@ -18,9 +18,7 @@ kitchen_tests_upload_common: - kitchen_windows_process_agent-a7 - kitchen_windows_upgrade5_agent-a6 - kitchen_windows_upgrade5_agent-a7 - - kitchen_windows_upgrade6_agent-a6 - kitchen_windows_upgrade6_agent-a7 - - kitchen_windows_upgrade7_agent-a7 variables: DD_ENV: ci script: diff --git a/.gitlab/maintenance_jobs/kitchen.yml b/.gitlab/maintenance_jobs/kitchen.yml index cf1b9140b6423c..4fce983028905b 100644 --- a/.gitlab/maintenance_jobs/kitchen.yml +++ b/.gitlab/maintenance_jobs/kitchen.yml @@ -7,12 +7,12 @@ periodic_kitchen_cleanup_s3: tags: ["arch:amd64"] rules: !reference [.on_testing_cleanup] script: - - MAX_AGE_HOURS=24 BUCKET_NAME=$DEB_TESTING_S3_BUCKET BUCKET_PREFIX=pool python3 /deploy_scripts/cleanup_s3.py - - MAX_AGE_HOURS=24 BUCKET_NAME=$DEB_TESTING_S3_BUCKET BUCKET_PREFIX=dists python3 /deploy_scripts/cleanup_s3.py - - MAX_AGE_HOURS=24 BUCKET_NAME=$RPM_TESTING_S3_BUCKET BUCKET_PREFIX=testing/ python3 /deploy_scripts/cleanup_s3.py - - MAX_AGE_HOURS=24 BUCKET_NAME=$RPM_TESTING_S3_BUCKET BUCKET_PREFIX=suse/testing/ python3 /deploy_scripts/cleanup_s3.py - - MAX_AGE_HOURS=24 BUCKET_NAME=$WIN_S3_BUCKET BUCKET_PREFIX=pipelines/A6/ python3 /deploy_scripts/cleanup_s3.py - - MAX_AGE_HOURS=24 BUCKET_NAME=$WIN_S3_BUCKET BUCKET_PREFIX=pipelines/A7/ python3 /deploy_scripts/cleanup_s3.py + - MAX_AGE_HOURS=72 BUCKET_NAME=$DEB_TESTING_S3_BUCKET BUCKET_PREFIX=pool python3 /deploy_scripts/cleanup_s3.py + - MAX_AGE_HOURS=72 BUCKET_NAME=$DEB_TESTING_S3_BUCKET BUCKET_PREFIX=dists python3 /deploy_scripts/cleanup_s3.py + - MAX_AGE_HOURS=72 BUCKET_NAME=$RPM_TESTING_S3_BUCKET BUCKET_PREFIX=testing/ python3 /deploy_scripts/cleanup_s3.py + - MAX_AGE_HOURS=72 BUCKET_NAME=$RPM_TESTING_S3_BUCKET BUCKET_PREFIX=suse/testing/ python3 /deploy_scripts/cleanup_s3.py + - MAX_AGE_HOURS=72 BUCKET_NAME=$WIN_S3_BUCKET BUCKET_PREFIX=pipelines/A6/ python3 /deploy_scripts/cleanup_s3.py + - MAX_AGE_HOURS=72 BUCKET_NAME=$WIN_S3_BUCKET BUCKET_PREFIX=pipelines/A7/ python3 /deploy_scripts/cleanup_s3.py # Kills any VMs that might have been left over by kitchen # The script only deletes VMs that have been there for >= 4 hours, which is more than the time limit diff --git a/.gitlab/new-e2e_testing/windows.yml b/.gitlab/new-e2e_testing/windows.yml index b53febd2440480..7aefa516a6be5e 100644 --- a/.gitlab/new-e2e_testing/windows.yml +++ b/.gitlab/new-e2e_testing/windows.yml @@ -4,7 +4,9 @@ variables: TARGETS: ./tests/windows/install-test TEAM: windows-agent - EXTRA_PARAMS: --run 'TestMSI/Agent*/TestUpgrade$' + EXTRA_PARAMS: --run "TestMSI/Agent/$E2E_MSI_TEST$" + extends: + - .new_e2e_template before_script: # WINDOWS_AGENT_VERSION is used to verify the installed agent version # Must run before new_e2e_template changes the aws profile @@ -15,19 +17,29 @@ - export LAST_STABLE_VERSION=$(invoke release.get-release-json-value "last_stable::$AGENT_MAJOR_VERSION") - !reference [.new_e2e_template, script] +.new-e2e_windows_installer_tests: + parallel: + matrix: + - E2E_MSI_TEST: TestInstall + - E2E_MSI_TEST: TestRepair + - E2E_MSI_TEST: TestUpgrade + - E2E_MSI_TEST: TestUpgradeRollback + # Agent 6 .new-e2e_windows_a6_x86_64: variables: WINDOWS_AGENT_ARCH: "x86_64" + extends: + - .new-e2e_windows_msi + - .new-e2e_agent_a6 needs: ["deploy_windows_testing-a6"] +## full tests new-e2e-windows-agent-msi-windows-server-a6-x86_64: stage: kitchen_testing extends: - - .new_e2e_template - - .new-e2e_windows_msi - .new-e2e_windows_a6_x86_64 - - .new-e2e_agent_a6 + - .new-e2e_windows_installer_tests rules: - !reference [.on_deploy_a6] - !reference [.on_windows_installer_changes_or_manual] @@ -36,15 +48,33 @@ new-e2e-windows-agent-msi-windows-server-a6-x86_64: .new-e2e_windows_a7_x86_64: variables: WINDOWS_AGENT_ARCH: "x86_64" + extends: + - .new-e2e_windows_msi + - .new-e2e_agent_a7 needs: ["deploy_windows_testing-a7"] +## full tests new-e2e-windows-agent-msi-windows-server-a7-x86_64: stage: kitchen_testing extends: - - .new_e2e_template + - .new-e2e_windows_a7_x86_64 + - .new-e2e_windows_installer_tests + rules: + - !reference [.on_deploy_a7] + - !reference [.on_windows_installer_changes_or_manual] + +## single test for PRs +## skipped if the full tests are running +new-e2e-windows-agent-msi-upgrade-windows-server-a7-x86_64: + stage: kitchen_testing + extends: - .new-e2e_windows_msi - .new-e2e_windows_a7_x86_64 - - .new-e2e_agent_a7 rules: + - !reference [.except_main_or_release_branch] + - !reference [.except_windows_installer_changes] - !reference [.on_default_new-e2e_tests_a7] - - !reference [.on_windows_installer_changes_or_manual] + # must be last since it ends with when: on_success + - !reference [.except_deploy] + variables: + E2E_MSI_TEST: TestUpgradeRollback \ No newline at end of file diff --git a/.gitlab/package_build/dmg.yml b/.gitlab/package_build/dmg.yml index c1a7973b0dcf07..1f3ea8257be361 100644 --- a/.gitlab/package_build/dmg.yml +++ b/.gitlab/package_build/dmg.yml @@ -31,6 +31,7 @@ agent_dmg-x64-a7: variables: AGENT_MAJOR_VERSION: 7 PYTHON_RUNTIMES: "3" + timeout: 6h before_script: - source /root/.bashrc - export RELEASE_VERSION=$RELEASE_VERSION_7 diff --git a/.gitlab/source_test/macos.yml b/.gitlab/source_test/macos.yml index efe673b5da86ea..6e811acae52345 100644 --- a/.gitlab/source_test/macos.yml +++ b/.gitlab/source_test/macos.yml @@ -19,6 +19,7 @@ tests_macos: - inv -e github.trigger-macos-test --datadog-agent-ref "$CI_COMMIT_SHA" --python-runtimes "$PYTHON_RUNTIMES" --version-cache "$VERSION_CACHE_CONTENT" after_script: - inv -e junit-macos-repack --infile junit-tests_macos.tgz --outfile junit-tests_macos-repacked.tgz + timeout: 6h artifacts: expire_in: 2 weeks when: always @@ -36,6 +37,7 @@ lint_macos: tags: ["arch:amd64"] variables: PYTHON_RUNTIMES: '3' + timeout: 6h script: - source /root/.bashrc - export GITHUB_KEY_B64=$(aws ssm get-parameter --region us-east-1 --name ci.datadog-agent.macos_github_key_b64 --with-decryption --query "Parameter.Value" --out text) diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 0c74e8b9254119..359f1b9f3a5c91 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -1013,7 +1013,7 @@ core,github.com/googleapis/gax-go/v2/apierror,BSD-3-Clause,Copyright 2017 Google core,github.com/googleapis/gax-go/v2/apierror/internal/proto,BSD-3-Clause,Copyright 2017 Google Inc. core,github.com/googleapis/gax-go/v2/callctx,BSD-3-Clause,Copyright 2017 Google Inc. core,github.com/googleapis/gax-go/v2/internal,BSD-3-Clause,Copyright 2017 Google Inc. -core,github.com/gorilla/mux,BSD-3-Clause,Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved | Google LLC (https://opensource.google.com/) | Kamil Kisielk | Matt Silverlock | Rodrigo Moraes (https://github.com/moraes) +core,github.com/gorilla/mux,BSD-3-Clause,Copyright (c) 2023 The Gorilla Authors. All rights reserved core,github.com/gosnmp/gosnmp,BSD-2-Clause,Copyright 2012-2020 The GoSNMP Authors. All rights reserved. core,github.com/grpc-ecosystem/go-grpc-middleware,Apache-2.0,Copyright 2016 Michal Witkowski. All Rights Reserved. core,github.com/grpc-ecosystem/go-grpc-middleware/auth,Apache-2.0,Copyright 2016 Michal Witkowski. All Rights Reserved. diff --git a/cmd/agent/common/path/go.mod b/cmd/agent/common/path/go.mod index 0f3eea80d6b343..56879f9be16a80 100644 --- a/cmd/agent/common/path/go.mod +++ b/cmd/agent/common/path/go.mod @@ -7,7 +7,6 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../pkg/util/winutil - ) require ( diff --git a/cmd/agent/common/remote_config.go b/cmd/agent/common/remote_config.go deleted file mode 100644 index 414803504bd9fd..00000000000000 --- a/cmd/agent/common/remote_config.go +++ /dev/null @@ -1,75 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2023-present Datadog, Inc. -// TODO https://datadoghq.atlassian.net/browse/RC-1453 Remove this file once the remote config service is refactored - -package common - -import ( - "fmt" - - "github.com/DataDog/datadog-agent/pkg/config" - remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" - configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" - "github.com/DataDog/datadog-agent/pkg/telemetry" - "github.com/DataDog/datadog-agent/pkg/version" -) - -// NewRemoteConfigService returns a new remote configuration service -func NewRemoteConfigService(hostname string) (*remoteconfig.Service, error) { - apiKey := config.Datadog.GetString("api_key") - if config.Datadog.IsSet("remote_configuration.api_key") { - apiKey = config.Datadog.GetString("remote_configuration.api_key") - } - apiKey = configUtils.SanitizeAPIKey(apiKey) - baseRawURL := configUtils.GetMainEndpoint(config.Datadog, "https://config.", "remote_configuration.rc_dd_url") - traceAgentEnv := configUtils.GetTraceAgentDefaultEnv(config.Datadog) - configuredTags := configUtils.GetConfiguredTags(config.Datadog, false) - - telemetryReporter := newRcTelemetryReporter() - - configService, err := remoteconfig.NewService(config.Datadog, apiKey, baseRawURL, hostname, configuredTags, telemetryReporter, version.AgentVersion, remoteconfig.WithTraceAgentEnv(traceAgentEnv)) - if err != nil { - return nil, fmt.Errorf("unable to create remote-config service: %w", err) - } - - return configService, nil -} - -// ddRcTelemetryReporter is a datadog-agent telemetry counter for RC cache bypass metrics. It implements the RcTelemetryReporter interface. -type ddRcTelemetryReporter struct { - BypassRateLimitCounter telemetry.Counter - BypassTimeoutCounter telemetry.Counter -} - -// IncRateLimit increments the ddRcTelemetryReporter BypassRateLimitCounter counter. -func (r *ddRcTelemetryReporter) IncRateLimit() { - r.BypassRateLimitCounter.Inc() -} - -// IncTimeout increments the ddRcTelemetryReporter BypassTimeoutCounter counter. -func (r *ddRcTelemetryReporter) IncTimeout() { - r.BypassTimeoutCounter.Inc() -} - -// newRcTelemetryReporter returns a new ddRcTelemetryReporter that uses the datadog-agent telemetry package to emit metrics. -func newRcTelemetryReporter() *ddRcTelemetryReporter { - commonOpts := telemetry.Options{NoDoubleUnderscoreSep: true} - return &ddRcTelemetryReporter{ - BypassRateLimitCounter: telemetry.NewCounterWithOpts( - "remoteconfig", - "cache_bypass_ratelimiter_skip", - []string{}, - "Number of Remote Configuration cache bypass requests skipped by rate limiting.", - commonOpts, - ), - BypassTimeoutCounter: telemetry.NewCounterWithOpts( - "remoteconfig", - "cache_bypass_timeout", - []string{}, - "Number of Remote Configuration cache bypass requests that timeout.", - commonOpts, - ), - } -} diff --git a/cmd/agent/gui/gui.go b/cmd/agent/gui/gui.go index c2d628fbc6eae7..a321f2de28bb68 100644 --- a/cmd/agent/gui/gui.go +++ b/cmd/agent/gui/gui.go @@ -30,6 +30,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/flare" "github.com/DataDog/datadog-agent/comp/core/status" "github.com/DataDog/datadog-agent/pkg/api/security" + pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -103,7 +104,7 @@ func StartGUIServer(port string, flare flare.Component, statusComponent status.C } // Fetch the authentication token (persists across sessions) - authToken, e = security.FetchAuthToken() + authToken, e = security.FetchAuthToken(pkgconfig.Datadog) if e != nil { listener.Close() listener = nil diff --git a/cmd/agent/subcommands/diagnose/command.go b/cmd/agent/subcommands/diagnose/command.go index 746b4980c5692f..c089e6179e8b63 100644 --- a/cmd/agent/subcommands/diagnose/command.go +++ b/cmd/agent/subcommands/diagnose/command.go @@ -251,7 +251,7 @@ func cmdDiagnose(cliParams *cliParams, // NOTE: This and related will be moved to separate "agent telemetry" command in future func printPayload(name payloadName, _ log.Component, config config.Component) error { - if err := util.SetAuthToken(); err != nil { + if err := util.SetAuthToken(config); err != nil { fmt.Println(err) return nil } diff --git a/cmd/agent/subcommands/dogstatsd/command.go b/cmd/agent/subcommands/dogstatsd/command.go index 3b7942f4c0544e..312829be0742ab 100644 --- a/cmd/agent/subcommands/dogstatsd/command.go +++ b/cmd/agent/subcommands/dogstatsd/command.go @@ -90,7 +90,7 @@ func triggerDump(config cconfig.Component) (string, error) { port := config.GetInt("cmd_port") url := fmt.Sprintf("https://%v:%v/agent/dogstatsd-contexts-dump", addr, port) - err = util.SetAuthToken() + err = util.SetAuthToken(config) if err != nil { return "", err } diff --git a/cmd/agent/subcommands/dogstatsdcapture/command.go b/cmd/agent/subcommands/dogstatsdcapture/command.go index 508866d11ccf9a..905cd471f2a688 100644 --- a/cmd/agent/subcommands/dogstatsdcapture/command.go +++ b/cmd/agent/subcommands/dogstatsdcapture/command.go @@ -81,7 +81,7 @@ func dogstatsdCapture(log log.Component, config config.Component, cliParams *cli ctx, cancel := context.WithCancel(context.Background()) defer cancel() - token, err := security.FetchAuthToken() + token, err := security.FetchAuthToken(config) if err != nil { return fmt.Errorf("unable to fetch authentication token: %w", err) } diff --git a/cmd/agent/subcommands/dogstatsdreplay/command.go b/cmd/agent/subcommands/dogstatsdreplay/command.go index 0b1236b1225423..944440854459e5 100644 --- a/cmd/agent/subcommands/dogstatsdreplay/command.go +++ b/cmd/agent/subcommands/dogstatsdreplay/command.go @@ -92,7 +92,7 @@ func dogstatsdReplay(log log.Component, config config.Component, cliParams *cliP fmt.Printf("Replaying dogstatsd traffic...\n\n") // TODO: refactor all the instantiation of the SecureAgentClient to a helper - token, err := security.FetchAuthToken() + token, err := security.FetchAuthToken(config) if err != nil { return fmt.Errorf("unable to fetch authentication token: %w", err) } diff --git a/cmd/agent/subcommands/dogstatsdstats/command.go b/cmd/agent/subcommands/dogstatsdstats/command.go index 107c958de95dee..e2a67a219dc1cc 100644 --- a/cmd/agent/subcommands/dogstatsdstats/command.go +++ b/cmd/agent/subcommands/dogstatsdstats/command.go @@ -77,7 +77,7 @@ func requestDogstatsdStats(log log.Component, config config.Component, cliParams urlstr := fmt.Sprintf("https://%v:%v/agent/dogstatsd-stats", ipcAddress, pkgconfig.Datadog.GetInt("cmd_port")) // Set session token - e = util.SetAuthToken() + e = util.SetAuthToken(config) if e != nil { return e } diff --git a/cmd/agent/subcommands/flare/command.go b/cmd/agent/subcommands/flare/command.go index c8edfedad1d923..d9126952e3310a 100644 --- a/cmd/agent/subcommands/flare/command.go +++ b/cmd/agent/subcommands/flare/command.go @@ -330,7 +330,7 @@ func requestArchive(flareComp flare.Component, pdata flare.ProfileData) (string, urlstr := fmt.Sprintf("https://%v:%v/agent/flare", ipcAddress, pkgconfig.Datadog.GetInt("cmd_port")) // Set session token - if err = util.SetAuthToken(); err != nil { + if err = util.SetAuthToken(pkgconfig.Datadog); err != nil { fmt.Fprintln(color.Output, color.RedString(fmt.Sprintf("Error: %s", err))) return createArchive(flareComp, pdata, err) } diff --git a/cmd/agent/subcommands/jmx/command.go b/cmd/agent/subcommands/jmx/command.go index 7dcf2a382f2dee..fee666b63e726d 100644 --- a/cmd/agent/subcommands/jmx/command.go +++ b/cmd/agent/subcommands/jmx/command.go @@ -16,8 +16,6 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/comp/forwarder/eventplatformreceiver" - "github.com/spf13/cobra" "go.uber.org/fx" @@ -42,11 +40,13 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd/replay" dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" serverdebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" + "github.com/DataDog/datadog-agent/comp/forwarder/eventplatformreceiver" "github.com/DataDog/datadog-agent/comp/metadata/host" "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" "github.com/DataDog/datadog-agent/comp/metadata/inventorychecks" "github.com/DataDog/datadog-agent/comp/metadata/inventoryhost" "github.com/DataDog/datadog-agent/comp/metadata/packagesigning" + "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" "github.com/DataDog/datadog-agent/pkg/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/cli/standalone" pkgcollector "github.com/DataDog/datadog-agent/pkg/collector" @@ -141,6 +141,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { fx.Provide(func() demultiplexer.Component { return nil }), fx.Provide(func() inventorychecks.Component { return nil }), fx.Provide(func() packagesigning.Component { return nil }), + fx.Provide(func() optional.Option[rcservice.Component] { return optional.NewNoneOption[rcservice.Component]() }), fx.Provide(func() status.Component { return nil }), fx.Provide(func() eventplatformreceiver.Component { return nil }), fx.Provide(tagger.NewTaggerParamsForCoreAgent), diff --git a/cmd/agent/subcommands/launchgui/command.go b/cmd/agent/subcommands/launchgui/command.go index 18d0ed695068fa..6bb00b1466f0a8 100644 --- a/cmd/agent/subcommands/launchgui/command.go +++ b/cmd/agent/subcommands/launchgui/command.go @@ -50,14 +50,14 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { return []*cobra.Command{launchCmd} } -func launchGui(_ config.Component, _ *cliParams) error { +func launchGui(config config.Component, _ *cliParams) error { guiPort := pkgconfig.Datadog.GetString("GUI_port") if guiPort == "-1" { return fmt.Errorf("GUI not enabled: to enable, please set an appropriate port in your datadog.yaml file") } // Read the authentication token: can only be done if user can read from datadog.yaml - authToken, err := security.FetchAuthToken() + authToken, err := security.FetchAuthToken(config) if err != nil { return err } @@ -69,7 +69,7 @@ func launchGui(_ config.Component, _ *cliParams) error { return err } urlstr := fmt.Sprintf("https://%v:%v/agent/gui/csrf-token", ipcAddress, pkgconfig.Datadog.GetInt("cmd_port")) - err = util.SetAuthToken() + err = util.SetAuthToken(config) if err != nil { return err } diff --git a/cmd/agent/subcommands/remoteconfig/command.go b/cmd/agent/subcommands/remoteconfig/command.go index 663f7eb0e43319..3185ccef9db420 100644 --- a/cmd/agent/subcommands/remoteconfig/command.go +++ b/cmd/agent/subcommands/remoteconfig/command.go @@ -62,7 +62,7 @@ func state(_ *cliParams, config config.Component) error { fmt.Println("Fetching the configuration and director repos state..") // Call GRPC endpoint returning state tree - token, err := security.FetchAuthToken() + token, err := security.FetchAuthToken(config) if err != nil { return fmt.Errorf("couldn't get auth token: %w", err) } diff --git a/cmd/agent/subcommands/run/command.go b/cmd/agent/subcommands/run/command.go index d2c066a4102050..9327647bc5db7c 100644 --- a/cmd/agent/subcommands/run/command.go +++ b/cmd/agent/subcommands/run/command.go @@ -84,6 +84,8 @@ import ( otelcollector "github.com/DataDog/datadog-agent/comp/otelcol/collector" processagentStatusImpl "github.com/DataDog/datadog-agent/comp/process/status/statusimpl" "github.com/DataDog/datadog-agent/comp/remote-config/rcclient" + "github.com/DataDog/datadog-agent/comp/remote-config/rcservice/rcserviceimpl" + "github.com/DataDog/datadog-agent/comp/remote-config/rctelemetryreporter/rctelemetryreporterimpl" "github.com/DataDog/datadog-agent/comp/snmptraps" snmptrapsServer "github.com/DataDog/datadog-agent/comp/snmptraps/server" traceagentStatusImpl "github.com/DataDog/datadog-agent/comp/trace/status/statusimpl" @@ -98,7 +100,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/commonchecks" pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/remote/data" - remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" "github.com/DataDog/datadog-agent/pkg/diagnose" adScheduler "github.com/DataDog/datadog-agent/pkg/logs/schedulers/ad" pkgMetadata "github.com/DataDog/datadog-agent/pkg/metadata" @@ -332,6 +333,8 @@ func getSharedFxOption() fx.Option { demultiplexerimpl.Module(), dogstatsd.Bundle(), otelcol.Bundle(), + rctelemetryreporterimpl.Module(), + rcserviceimpl.Module(), rcclient.Module(), fx.Provide(tagger.NewTaggerParamsForCoreAgent), tagger.Module(), @@ -472,7 +475,7 @@ func startAgent( ctx, _ := pkgcommon.GetMainCtxCancel() healthPort := pkgconfig.Datadog.GetInt("health_port") if healthPort > 0 { - err := healthprobe.Serve(ctx, healthPort) + err := healthprobe.Serve(ctx, pkgconfig.Datadog, healthPort) if err != nil { return log.Errorf("Error starting health port, exiting: %v", err) } @@ -499,15 +502,7 @@ func startAgent( log.Infof("Hostname is: %s", hostnameDetected) // start remote configuration management - var configService *remoteconfig.Service if pkgconfig.IsRemoteConfigEnabled(pkgconfig.Datadog) { - configService, err = common.NewRemoteConfigService(hostnameDetected) - if err != nil { - log.Errorf("Failed to initialize config management service: %s", err) - } else { - configService.Start(context.Background()) - } - if err := rcclient.Start("core-agent"); err != nil { pkglog.Errorf("Failed to start the RC client component: %s", err) } else { @@ -541,7 +536,6 @@ func startAgent( // start the cmd HTTP server if err = agentAPI.StartServer( - configService, wmeta, taggerComp, logsAgent, diff --git a/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_server.go b/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_server.go index ae4393643f0183..9ff892d3b89d76 100644 --- a/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_server.go +++ b/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_server.go @@ -55,7 +55,7 @@ func StartCLCRunnerServer(extraHandlers map[string]http.Handler) error { // CLC Runner token // Use the Cluster Agent token - err = util.InitDCAAuthToken() + err = util.InitDCAAuthToken(config.Datadog) if err != nil { return err } diff --git a/cmd/agent/subcommands/secret/command.go b/cmd/agent/subcommands/secret/command.go index 5193f7e2420f14..94918f9284c931 100644 --- a/cmd/agent/subcommands/secret/command.go +++ b/cmd/agent/subcommands/secret/command.go @@ -79,7 +79,7 @@ func secretRefresh(config config.Component) error { } func callAPIEndpoint(apiEndpointPath string, config config.Component) ([]byte, error) { - if err := util.SetAuthToken(); err != nil { + if err := util.SetAuthToken(config); err != nil { fmt.Println(err) return nil, err } diff --git a/cmd/agent/subcommands/status/command.go b/cmd/agent/subcommands/status/command.go index c80d6e34a4f7c2..6a1a8d5bf3f5ff 100644 --- a/cmd/agent/subcommands/status/command.go +++ b/cmd/agent/subcommands/status/command.go @@ -228,7 +228,7 @@ func makeRequest(url string) ([]byte, error) { c := util.GetClient(false) // FIX: get certificates right then make this true // Set session token - e = util.SetAuthToken() + e = util.SetAuthToken(pkgconfig.Datadog) if e != nil { return nil, e } diff --git a/cmd/agent/subcommands/stop/command.go b/cmd/agent/subcommands/stop/command.go index 8845d2ea1db433..b247c3c8038675 100644 --- a/cmd/agent/subcommands/stop/command.go +++ b/cmd/agent/subcommands/stop/command.go @@ -54,7 +54,7 @@ func stop(config config.Component, _ *cliParams) error { c := util.GetClient(false) // FIX: get certificates right then make this true // Set session token - e := util.SetAuthToken() + e := util.SetAuthToken(config) if e != nil { return e } diff --git a/cmd/agent/subcommands/streamep/command.go b/cmd/agent/subcommands/streamep/command.go index a96c6a0271713e..ff88810256a4e4 100644 --- a/cmd/agent/subcommands/streamep/command.go +++ b/cmd/agent/subcommands/streamep/command.go @@ -78,7 +78,7 @@ func streamRequest(url string, body []byte, onChunk func([]byte)) error { c := util.GetClient(false) // Set session token - e = util.SetAuthToken() + e = util.SetAuthToken(pkgconfig.Datadog) if e != nil { return e } diff --git a/cmd/agent/subcommands/streamlogs/command.go b/cmd/agent/subcommands/streamlogs/command.go index c60d89017d4b39..f4d1f960c698b4 100644 --- a/cmd/agent/subcommands/streamlogs/command.go +++ b/cmd/agent/subcommands/streamlogs/command.go @@ -82,7 +82,7 @@ func streamRequest(url string, body []byte, onChunk func([]byte)) error { c := util.GetClient(false) // Set session token - e = util.SetAuthToken() + e = util.SetAuthToken(pkgconfig.Datadog) if e != nil { return e } diff --git a/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go b/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go index c3a5367416daec..5c1550241860bd 100644 --- a/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go +++ b/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go @@ -111,7 +111,7 @@ func run(log log.Component, taggerComp tagger.Component, demultiplexer demultipl // Setup healthcheck port var healthPort = pkgconfig.Datadog.GetInt("health_port") if healthPort > 0 { - err := healthprobe.Serve(mainCtx, healthPort) + err := healthprobe.Serve(mainCtx, pkgconfig.Datadog, healthPort) if err != nil { return pkglog.Errorf("Error starting health port, exiting: %v", err) } diff --git a/cmd/cluster-agent/api/server.go b/cmd/cluster-agent/api/server.go index 987ac9d01ac056..40fe82ce81ab80 100644 --- a/cmd/cluster-agent/api/server.go +++ b/cmd/cluster-agent/api/server.go @@ -74,10 +74,10 @@ func StartServer(w workloadmeta.Component, taggerComp tagger.Component, senderMa return fmt.Errorf("unable to create the api server: %v", err) } // Internal token - util.CreateAndSetAuthToken() //nolint:errcheck + util.CreateAndSetAuthToken(config.Datadog) //nolint:errcheck // DCA client token - util.InitDCAAuthToken() //nolint:errcheck + util.InitDCAAuthToken(config.Datadog) //nolint:errcheck // create cert hosts := []string{"127.0.0.1", "localhost"} diff --git a/cmd/cluster-agent/api/server_test.go b/cmd/cluster-agent/api/server_test.go index a24a59d3efe556..2850a22ed3d54c 100644 --- a/cmd/cluster-agent/api/server_test.go +++ b/cmd/cluster-agent/api/server_test.go @@ -21,7 +21,7 @@ import ( func TestValidateTokenMiddleware(t *testing.T) { mockConfig := config.Mock(t) mockConfig.SetWithoutSource("cluster_agent.auth_token", "abc123") - util.InitDCAAuthToken() + util.InitDCAAuthToken(config.Datadog) tests := []struct { path, authToken string diff --git a/cmd/cluster-agent/subcommands/metamap/command.go b/cmd/cluster-agent/subcommands/metamap/command.go index 7cfdd01b5ba5ef..e3e11271d67ef4 100644 --- a/cmd/cluster-agent/subcommands/metamap/command.go +++ b/cmd/cluster-agent/subcommands/metamap/command.go @@ -78,7 +78,7 @@ func getMetadataMap(nodeName string) error { } // Set session token - e = util.SetAuthToken() + e = util.SetAuthToken(pkgconfig.Datadog) if e != nil { return e } diff --git a/cmd/cluster-agent/subcommands/start/command.go b/cmd/cluster-agent/subcommands/start/command.go index b07f7aa2f5f260..383eeed5d2de76 100644 --- a/cmd/cluster-agent/subcommands/start/command.go +++ b/cmd/cluster-agent/subcommands/start/command.go @@ -11,6 +11,7 @@ package start import ( "context" "fmt" + "github.com/DataDog/datadog-agent/pkg/util/optional" "net/http" "os" "os/signal" @@ -43,6 +44,9 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatformreceiver/eventplatformreceiverimpl" orchestratorForwarderImpl "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorimpl" + rccomp "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" + "github.com/DataDog/datadog-agent/comp/remote-config/rcservice/rcserviceimpl" + "github.com/DataDog/datadog-agent/comp/remote-config/rctelemetryreporter/rctelemetryreporterimpl" "github.com/DataDog/datadog-agent/pkg/api/healthprobe" "github.com/DataDog/datadog-agent/pkg/clusteragent" admissionpkg "github.com/DataDog/datadog-agent/pkg/clusteragent/admission" @@ -54,7 +58,6 @@ import ( pkgconfig "github.com/DataDog/datadog-agent/pkg/config" rcclient "github.com/DataDog/datadog-agent/pkg/config/remote/client" "github.com/DataDog/datadog-agent/pkg/config/remote/data" - rcservice "github.com/DataDog/datadog-agent/pkg/config/remote/service" commonsettings "github.com/DataDog/datadog-agent/pkg/config/settings" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util" @@ -65,7 +68,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" pkglog "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" "github.com/DataDog/datadog-agent/pkg/version" "github.com/DataDog/datadog-agent/pkg/clusteragent/languagedetection" @@ -135,6 +137,8 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { fx.Provide(tagger.NewTaggerParams), tagger.Module(), collectorimpl.Module(), + rcserviceimpl.Module(), + rctelemetryreporterimpl.Module(), ) }, } @@ -142,7 +146,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { return []*cobra.Command{startCmd} } -func start(log log.Component, config config.Component, taggerComp tagger.Component, telemetry telemetry.Component, demultiplexer demultiplexer.Component, wmeta workloadmeta.Component, secretResolver secrets.Component, collector collector.Component) error { +func start(log log.Component, config config.Component, taggerComp tagger.Component, telemetry telemetry.Component, demultiplexer demultiplexer.Component, wmeta workloadmeta.Component, secretResolver secrets.Component, collector collector.Component, rcService optional.Option[rccomp.Component]) error { stopCh := make(chan struct{}) mainCtx, mainCtxCancel := context.WithCancel(context.Background()) @@ -188,7 +192,7 @@ func start(log log.Component, config config.Component, taggerComp tagger.Compone // Setup healthcheck port healthPort := pkgconfig.Datadog.GetInt("health_port") if healthPort > 0 { - err := healthprobe.Serve(mainCtx, healthPort) + err := healthprobe.Serve(mainCtx, config, healthPort) if err != nil { return fmt.Errorf("Error starting health port, exiting: %v", err) } @@ -196,19 +200,17 @@ func start(log log.Component, config config.Component, taggerComp tagger.Compone pkglog.Debugf("Health check listening on port %d", healthPort) } - // Initialize remote configuration + // Initialize and start remote configuration client var rcClient *rcclient.Client - var rcService *rcservice.Service - if pkgconfig.IsRemoteConfigEnabled(pkgconfig.Datadog) { + rcserv, isSet := rcService.Get() + if pkgconfig.IsRemoteConfigEnabled(config) && isSet { var err error - rcClient, rcService, err = initializeRemoteConfig(mainCtx) + rcClient, err = initializeRemoteConfigClient(mainCtx, rcserv) if err != nil { log.Errorf("Failed to start remote-configuration: %v", err) } else { - rcService.Start(mainCtx) rcClient.Start() defer func() { - _ = rcService.Stop() rcClient.Close() }() } @@ -484,23 +486,18 @@ func setupClusterCheck(ctx context.Context) (*clusterchecks.Handler, error) { return handler, nil } -func initializeRemoteConfig(ctx context.Context) (*rcclient.Client, *rcservice.Service, error) { +func initializeRemoteConfigClient(ctx context.Context, rcService rccomp.Component) (*rcclient.Client, error) { clusterName := "" hname, err := hostname.Get(ctx) if err != nil { - pkglog.Warnf("Error while getting hostname, needed for retrieving cluster-name: cluster-name won't be set for remote-config") + pkglog.Warnf("Error while getting hostname, needed for retrieving cluster-name: cluster-name won't be set for remote-config client") } else { clusterName = clustername.GetClusterName(context.TODO(), hname) } clusterID, err := clustername.GetClusterID() if err != nil { - pkglog.Warnf("Error retrieving cluster ID: cluster-id won't be set for remote-config") - } - - rcService, err := common.NewRemoteConfigService(hname) - if err != nil { - return nil, nil, err + pkglog.Warnf("Error retrieving cluster ID: cluster-id won't be set for remote-config client") } rcClient, err := rcclient.NewClient(rcService, @@ -511,10 +508,10 @@ func initializeRemoteConfig(ctx context.Context) (*rcclient.Client, *rcservice.S rcclient.WithDirectorRootOverride(pkgconfig.Datadog.GetString("remote_configuration.director_root")), ) if err != nil { - return nil, nil, fmt.Errorf("unable to create local remote-config client: %w", err) + return nil, fmt.Errorf("unable to create local remote-config client: %w", err) } - return rcClient, rcService, nil + return rcClient, nil } func registerChecks() { diff --git a/cmd/cluster-agent/subcommands/status/command.go b/cmd/cluster-agent/subcommands/status/command.go index 2742221cbd82ad..270f8f61677d72 100644 --- a/cmd/cluster-agent/subcommands/status/command.go +++ b/cmd/cluster-agent/subcommands/status/command.go @@ -72,7 +72,7 @@ func run(log log.Component, config config.Component, cliParams *cliParams) error urlstr := fmt.Sprintf("https://localhost:%v/status", pkgconfig.Datadog.GetInt("cluster_agent.cmd_port")) // Set session token - e = util.SetAuthToken() + e = util.SetAuthToken(config) if e != nil { return e } diff --git a/cmd/dogstatsd/subcommands/start/command.go b/cmd/dogstatsd/subcommands/start/command.go index 3d9ca37bdbd0ad..08a4729822e06f 100644 --- a/cmd/dogstatsd/subcommands/start/command.go +++ b/cmd/dogstatsd/subcommands/start/command.go @@ -264,7 +264,7 @@ func RunDogstatsd(ctx context.Context, cliParams *CLIParams, config config.Compo // Setup healthcheck port var healthPort = config.GetInt("health_port") if healthPort > 0 { - err = healthprobe.Serve(ctx, healthPort) + err = healthprobe.Serve(ctx, config, healthPort) if err != nil { err = log.Errorf("Error starting health port, exiting: %v", err) return diff --git a/cmd/process-agent/subcommands/workloadlist/command.go b/cmd/process-agent/subcommands/workloadlist/command.go index aa7199aa4a19f2..d9f2d4d71a4af7 100644 --- a/cmd/process-agent/subcommands/workloadlist/command.go +++ b/cmd/process-agent/subcommands/workloadlist/command.go @@ -51,11 +51,11 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { return []*cobra.Command{workloadListCommand} } -func workloadList(_ log.Component, _ config.Component, cliParams *cliParams) error { +func workloadList(_ log.Component, config config.Component, cliParams *cliParams) error { c := util.GetClient(false) // FIX: get certificates right then make this true // Set session token - err := util.SetAuthToken() + err := util.SetAuthToken(config) if err != nil { return err } diff --git a/cmd/security-agent/api/server.go b/cmd/security-agent/api/server.go index b90a5d9d0f4dcd..3cbfe2675ee4fd 100644 --- a/cmd/security-agent/api/server.go +++ b/cmd/security-agent/api/server.go @@ -60,7 +60,7 @@ func (s *Server) Start() error { // Validate token for every request r.Use(validateToken) - err := util.CreateAndSetAuthToken() + err := util.CreateAndSetAuthToken(config.Datadog) if err != nil { return err } diff --git a/cmd/security-agent/subcommands/config/config.go b/cmd/security-agent/subcommands/config/config.go index d925349b6add0a..b4a08f8dc46c24 100644 --- a/cmd/security-agent/subcommands/config/config.go +++ b/cmd/security-agent/subcommands/config/config.go @@ -129,7 +129,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { return []*cobra.Command{cmd} } func getSettingsClient(_ *cobra.Command, _ []string) (settings.Client, error) { - err := util.SetAuthToken() + err := util.SetAuthToken(pkgconfig.Datadog) if err != nil { return nil, err } diff --git a/cmd/security-agent/subcommands/flare/command.go b/cmd/security-agent/subcommands/flare/command.go index 1eac71b8fdb123..fd993b59a8d411 100644 --- a/cmd/security-agent/subcommands/flare/command.go +++ b/cmd/security-agent/subcommands/flare/command.go @@ -92,7 +92,7 @@ func requestFlare(_ log.Component, config config.Component, _ secrets.Component, logFile := config.GetString("security_agent.log_file") // Set session token - e = util.SetAuthToken() + e = util.SetAuthToken(config) if e != nil { return e } diff --git a/cmd/security-agent/subcommands/runtime/command.go b/cmd/security-agent/subcommands/runtime/command.go index f37ed57f037194..c62dd34576d240 100644 --- a/cmd/security-agent/subcommands/runtime/command.go +++ b/cmd/security-agent/subcommands/runtime/command.go @@ -397,8 +397,8 @@ func checkPoliciesLoaded(client secagent.SecurityModuleClientWrapper, writer io. return nil } -func newDefaultEvent() eval.Event { - return model.NewDefaultEvent() +func newFakeEvent() eval.Event { + return model.NewFakeEvent() } func checkPoliciesLocal(args *checkPoliciesCliParams, writer io.Writer) error { @@ -437,7 +437,7 @@ func checkPoliciesLocal(args *checkPoliciesCliParams, writer io.Writer) error { loader := rules.NewPolicyLoader(provider) - ruleSet := rules.NewRuleSet(&model.Model{}, newDefaultEvent, ruleOpts, evalOpts) + ruleSet := rules.NewRuleSet(&model.Model{}, newFakeEvent, ruleOpts, evalOpts) evaluationSet, err := rules.NewEvaluationSet([]*rules.RuleSet{ruleSet}) if err != nil { return err @@ -550,7 +550,7 @@ func evalRule(_ log.Component, _ config.Component, _ secrets.Component, evalArgs loader := rules.NewPolicyLoader(provider) - ruleSet := rules.NewRuleSet(&model.Model{}, newDefaultEvent, ruleOpts, evalOpts) + ruleSet := rules.NewRuleSet(&model.Model{}, newFakeEvent, ruleOpts, evalOpts) evaluationSet, err := rules.NewEvaluationSet([]*rules.RuleSet{ruleSet}) if err != nil { return err diff --git a/cmd/security-agent/subcommands/status/command.go b/cmd/security-agent/subcommands/status/command.go index 1587728887102b..c94243473a5caf 100644 --- a/cmd/security-agent/subcommands/status/command.go +++ b/cmd/security-agent/subcommands/status/command.go @@ -72,7 +72,7 @@ func runStatus(_ log.Component, config config.Component, _ secrets.Component, pa urlstr := fmt.Sprintf("https://localhost:%v/agent/status", config.GetInt("security_agent.cmd_port")) // Set session token - e = util.SetAuthToken() + e = util.SetAuthToken(config) if e != nil { return e } diff --git a/cmd/system-probe/subcommands/run/command.go b/cmd/system-probe/subcommands/run/command.go index 9e76774321a747..6f16c87cd0a8d0 100644 --- a/cmd/system-probe/subcommands/run/command.go +++ b/cmd/system-probe/subcommands/run/command.go @@ -310,7 +310,7 @@ func startSystemProbe(cliParams *cliParams, log log.Component, statsd compstatsd // Setup healthcheck port healthPort := cfg.HealthPort if healthPort > 0 { - err := healthprobe.Serve(ctx, healthPort) + err := healthprobe.Serve(ctx, ddconfig.Datadog, healthPort) if err != nil { return log.Errorf("error starting health check server, exiting: %s", err) } diff --git a/cmd/updater/command/command.go b/cmd/updater/command/command.go index 50aafd24e854f6..6135428da95504 100644 --- a/cmd/updater/command/command.go +++ b/cmd/updater/command/command.go @@ -10,6 +10,7 @@ import ( "fmt" "os" + "github.com/DataDog/datadog-agent/pkg/config" "github.com/fatih/color" "github.com/spf13/cobra" ) @@ -30,6 +31,9 @@ type GlobalParams struct { // file, to allow overrides from the command line ConfFilePath string + // LogFilePath is the path to the log file. + LogFilePath string + // Package is the package managed by this instance of the updater. Package string @@ -42,7 +46,9 @@ type SubcommandFactory func(globalParams *GlobalParams) []*cobra.Command // MakeCommand makes the top-level Cobra command for this app. func MakeCommand(subcommandFactories []SubcommandFactory) *cobra.Command { - globalParams := GlobalParams{} + globalParams := GlobalParams{ + ConfFilePath: config.DefaultUpdaterLogFile, + } // AgentCmd is the root command agentCmd := &cobra.Command{ @@ -54,7 +60,7 @@ Datadog Updater updates your agents based on requests received from the Datadog } agentCmd.PersistentFlags().StringVarP(&globalParams.ConfFilePath, "cfgpath", "c", "", "path to directory containing updater.yaml") - agentCmd.PersistentFlags().StringVarP(&globalParams.Package, "package", "p", "", "package to update") + agentCmd.PersistentFlags().StringVarP(&globalParams.Package, "package", "P", "", "package to update") agentCmd.PersistentFlags().StringVarP(&globalParams.RepositoriesDir, "repositories", "d", "/opt/datadog-packages", "path to directory containing repositories") _ = agentCmd.MarkFlagRequired("package") diff --git a/comp/README.md b/comp/README.md index a641daab8f5b69..af8db9ca35b8ff 100644 --- a/comp/README.md +++ b/comp/README.md @@ -374,6 +374,14 @@ Package remoteconfig defines the fx options for the Bundle +### [comp/remote-config/rcservice](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/remote-config/rcservice) + +Package rcservice is a remote config service that can run within the agent to receive remote config updates from the DD backend. + +### [comp/remote-config/rctelemetryreporter](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/remote-config/rctelemetryreporter) + +Package rctelemetryreporter provides a component that sends RC-specific metrics to the DD backend. + ## [comp/snmptraps](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/snmptraps) (Component Bundle) *Datadog Team*: network-device-monitoring diff --git a/comp/api/api/apiimpl/api.go b/comp/api/api/apiimpl/api.go index fa04db36173dc3..4d476208ca9e0b 100644 --- a/comp/api/api/apiimpl/api.go +++ b/comp/api/api/apiimpl/api.go @@ -7,6 +7,7 @@ package apiimpl import ( + "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" "net" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatformreceiver" @@ -30,7 +31,6 @@ import ( "github.com/DataDog/datadog-agent/comp/metadata/inventoryhost" "github.com/DataDog/datadog-agent/comp/metadata/packagesigning" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" - remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -55,6 +55,7 @@ type apiServer struct { pkgSigning packagesigning.Component statusComponent status.Component eventPlatformReceiver eventplatformreceiver.Component + rcService optional.Option[rcservice.Component] } type dependencies struct { @@ -73,6 +74,7 @@ type dependencies struct { PkgSigning packagesigning.Component StatusComponent status.Component EventPlatformReceiver eventplatformreceiver.Component + RcService optional.Option[rcservice.Component] } var _ api.Component = (*apiServer)(nil) @@ -92,18 +94,18 @@ func newAPIServer(deps dependencies) api.Component { pkgSigning: deps.PkgSigning, statusComponent: deps.StatusComponent, eventPlatformReceiver: deps.EventPlatformReceiver, + rcService: deps.RcService, } } // StartServer creates the router and starts the HTTP server func (server *apiServer) StartServer( - configService *remoteconfig.Service, wmeta workloadmeta.Component, taggerComp tagger.Component, logsAgent optional.Option[logsAgent.Component], senderManager sender.DiagnoseSenderManager, ) error { - return StartServers(configService, + return StartServers(server.rcService, server.flare, server.dogstatsdServer, server.capture, diff --git a/comp/api/api/apiimpl/api_mock.go b/comp/api/api/apiimpl/api_mock.go index 2d73b0149656b6..30dd01fcd7cfde 100644 --- a/comp/api/api/apiimpl/api_mock.go +++ b/comp/api/api/apiimpl/api_mock.go @@ -17,7 +17,6 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta" logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" - remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -39,7 +38,6 @@ func newMock() api.Mock { // StartServer creates the router and starts the HTTP server func (mock *mockAPIServer) StartServer( - _ *remoteconfig.Service, _ workloadmeta.Component, _ tagger.Component, _ optional.Option[logsAgent.Component], diff --git a/comp/api/api/apiimpl/grpc.go b/comp/api/api/apiimpl/grpc.go index 3610751084b129..353884e3216f81 100644 --- a/comp/api/api/apiimpl/grpc.go +++ b/comp/api/api/apiimpl/grpc.go @@ -8,6 +8,8 @@ package apiimpl import ( "context" "fmt" + "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" + "github.com/DataDog/datadog-agent/pkg/util/optional" "time" workloadmetaServer "github.com/DataDog/datadog-agent/comp/core/workloadmeta/server" @@ -21,7 +23,6 @@ import ( taggerserver "github.com/DataDog/datadog-agent/comp/core/tagger/server" dsdReplay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay" dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" - remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/grpc" "github.com/DataDog/datadog-agent/pkg/util/hostname" @@ -36,7 +37,7 @@ type serverSecure struct { pb.UnimplementedAgentSecureServer taggerServer *taggerserver.Server workloadmetaServer *workloadmetaServer.Server - configService *remoteconfig.Service + configService optional.Option[rcservice.Component] dogstatsdServer dogstatsdServer.Component capture dsdReplay.Component } @@ -114,19 +115,21 @@ func (s *serverSecure) DogstatsdSetTaggerState(_ context.Context, req *pb.Tagger var rcNotInitializedErr = status.Error(codes.Unimplemented, "remote configuration service not initialized") func (s *serverSecure) ClientGetConfigs(ctx context.Context, in *pb.ClientGetConfigsRequest) (*pb.ClientGetConfigsResponse, error) { - if s.configService == nil { + rcService, isSet := s.configService.Get() + if !isSet || rcService == nil { log.Debug(rcNotInitializedErr.Error()) return nil, rcNotInitializedErr } - return s.configService.ClientGetConfigs(ctx, in) + return rcService.ClientGetConfigs(ctx, in) } func (s *serverSecure) GetConfigState(_ context.Context, _ *emptypb.Empty) (*pb.GetStateConfigResponse, error) { - if s.configService == nil { + rcService, isSet := s.configService.Get() + if !isSet || rcService == nil { log.Debug(rcNotInitializedErr.Error()) return nil, rcNotInitializedErr } - return s.configService.ConfigGetState() + return rcService.ConfigGetState() } // WorkloadmetaStreamEntities streams entities from the workloadmeta store applying the given filter diff --git a/comp/api/api/apiimpl/server.go b/comp/api/api/apiimpl/server.go index 3b8033347205fb..ddc654fca70e3f 100644 --- a/comp/api/api/apiimpl/server.go +++ b/comp/api/api/apiimpl/server.go @@ -8,6 +8,7 @@ package apiimpl import ( "crypto/tls" "fmt" + "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" stdLog "log" "net" "net/http" @@ -34,7 +35,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/config" - remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -64,7 +64,7 @@ func stopServer(listener net.Listener, name string) { // StartServers creates certificates and starts API servers func StartServers( - configService *remoteconfig.Service, + configService optional.Option[rcservice.Component], flare flare.Component, dogstatsdServer dogstatsdServer.Component, capture replay.Component, @@ -106,7 +106,7 @@ func StartServers( MinVersion: tls.VersionTLS12, } - if err := util.CreateAndSetAuthToken(); err != nil { + if err := util.CreateAndSetAuthToken(config.Datadog); err != nil { return err } diff --git a/comp/api/api/apiimpl/server_cmd.go b/comp/api/api/apiimpl/server_cmd.go index 1a97816f51d321..d20398a5a3969b 100644 --- a/comp/api/api/apiimpl/server_cmd.go +++ b/comp/api/api/apiimpl/server_cmd.go @@ -10,6 +10,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" + "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" "net" "net/http" "time" @@ -44,7 +45,6 @@ import ( "github.com/DataDog/datadog-agent/comp/metadata/packagesigning" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/config" - remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -58,7 +58,7 @@ func startCMDServer( cmdAddr string, tlsConfig *tls.Config, tlsCertPool *x509.CertPool, - configService *remoteconfig.Service, + configService optional.Option[rcservice.Component], flare flare.Component, dogstatsdServer dogstatsdServer.Component, capture replay.Component, diff --git a/comp/api/api/component.go b/comp/api/api/component.go index 8e1f5cb7c01030..4506bad7a92575 100644 --- a/comp/api/api/component.go +++ b/comp/api/api/component.go @@ -13,7 +13,6 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta" logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" - remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -27,7 +26,6 @@ import ( // Component is the component type. type Component interface { StartServer( - configService *remoteconfig.Service, wmeta workloadmeta.Component, tagger tagger.Component, logsAgent optional.Option[logsAgent.Component], diff --git a/comp/core/log/go.mod b/comp/core/log/go.mod index 91590b046039de..cad2c3fa108ef3 100644 --- a/comp/core/log/go.mod +++ b/comp/core/log/go.mod @@ -93,7 +93,7 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect - github.com/shirou/gopsutil/v3 v3.23.12 // indirect + github.com/shirou/gopsutil/v3 v3.24.1 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/spf13/afero v1.1.2 // indirect github.com/spf13/cast v1.5.1 // indirect diff --git a/comp/core/log/go.sum b/comp/core/log/go.sum index 50a0fd896a9698..31ba655d96678f 100644 --- a/comp/core/log/go.sum +++ b/comp/core/log/go.sum @@ -225,8 +225,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= -github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= -github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI= +github.com/shirou/gopsutil/v3 v3.24.1/go.mod h1:UU7a2MSBQa+kW1uuDq8DeEBS8kmrnQwsv2b5O513rwU= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -414,7 +414,6 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/comp/core/tagger/remote/tagger.go b/comp/core/tagger/remote/tagger.go index 61507bac364459..a02c081c8c9f24 100644 --- a/comp/core/tagger/remote/tagger.go +++ b/comp/core/tagger/remote/tagger.go @@ -73,7 +73,7 @@ type Options struct { func NodeAgentOptions(config configComponent.Component) (Options, error) { return Options{ Target: fmt.Sprintf(":%v", config.GetInt("cmd_port")), - TokenFetcher: security.FetchAuthToken, + TokenFetcher: func() (string, error) { return security.FetchAuthToken(config) }, }, nil } @@ -83,7 +83,7 @@ func NodeAgentOptions(config configComponent.Component) (Options, error) { func NodeAgentOptionsForSecruityResolvers() (Options, error) { return Options{ Target: fmt.Sprintf(":%v", config.Datadog.GetInt("cmd_port")), - TokenFetcher: security.FetchAuthToken, + TokenFetcher: func() (string, error) { return security.FetchAuthToken(config.Datadog) }, }, nil } @@ -101,7 +101,7 @@ func CLCRunnerOptions(config configComponent.Component) (Options, error) { // gRPC targets do not have a protocol. the DCA endpoint is always HTTPS, // so a simple `TrimPrefix` is enough. opts.Target = strings.TrimPrefix(target, "https://") - opts.TokenFetcher = security.GetClusterAgentAuthToken + opts.TokenFetcher = func() (string, error) { return security.GetClusterAgentAuthToken(config) } } return opts, nil diff --git a/comp/core/workloadmeta/collectors/internal/remote/generic.go b/comp/core/workloadmeta/collectors/internal/remote/generic.go index 6a1ca90997ddaf..1665b0329f283e 100644 --- a/comp/core/workloadmeta/collectors/internal/remote/generic.go +++ b/comp/core/workloadmeta/collectors/internal/remote/generic.go @@ -25,6 +25,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/telemetry" "github.com/DataDog/datadog-agent/pkg/api/security" + pkgconfig "github.com/DataDog/datadog-agent/pkg/config" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -147,7 +148,7 @@ func (c *GenericCollector) startWorkloadmetaStream(maxElapsed time.Duration) err default: } - token, err := security.FetchAuthToken() + token, err := security.FetchAuthToken(pkgconfig.Datadog) if err != nil { err = fmt.Errorf("unable to fetch authentication token: %w", err) log.Warnf("unable to establish entity stream between agents, will possibly retry: %s", err) diff --git a/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector_test.go b/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector_test.go index f771ac594ae4f2..01d98cbba68085 100644 --- a/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector_test.go +++ b/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector_test.go @@ -28,6 +28,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/remote" "github.com/DataDog/datadog-agent/pkg/api/security" + pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -62,11 +63,11 @@ func (s *mockServer) StreamEntities(_ *pbgo.ProcessStreamEntitiesRequest, out pb func TestCollection(t *testing.T) { // Create Auth Token for the client - if _, err := os.Stat(security.GetAuthTokenFilepath()); os.IsNotExist(err) { - security.CreateOrFetchToken() + if _, err := os.Stat(security.GetAuthTokenFilepath(pkgconfig.Datadog)); os.IsNotExist(err) { + security.CreateOrFetchToken(pkgconfig.Datadog) defer func() { // cleanup - os.Remove(security.GetAuthTokenFilepath()) + os.Remove(security.GetAuthTokenFilepath(pkgconfig.Datadog)) }() } creationTime := time.Now().Unix() diff --git a/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta_test.go b/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta_test.go index 33733662e2be5a..8f6068d2de2b01 100644 --- a/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta_test.go +++ b/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta_test.go @@ -17,18 +17,20 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/fx" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/emptypb" + "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/workloadmeta" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/remote" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/server" "github.com/DataDog/datadog-agent/pkg/api/security" + pkgconfig "github.com/DataDog/datadog-agent/pkg/config" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/proto" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/emptypb" ) type serverSecure struct { @@ -135,11 +137,11 @@ func TestHandleWorkloadmetaStreamResponse(t *testing.T) { func TestCollection(t *testing.T) { // Create Auth Token for the client - if _, err := os.Stat(security.GetAuthTokenFilepath()); os.IsNotExist(err) { - security.CreateOrFetchToken() + if _, err := os.Stat(security.GetAuthTokenFilepath(pkgconfig.Datadog)); os.IsNotExist(err) { + security.CreateOrFetchToken(pkgconfig.Datadog) defer func() { // cleanup - os.Remove(security.GetAuthTokenFilepath()) + os.Remove(security.GetAuthTokenFilepath(pkgconfig.Datadog)) }() } diff --git a/comp/forwarder/defaultforwarder/README.md b/comp/forwarder/defaultforwarder/README.md index 35233d50b1de0d..9975f8036b3aa2 100644 --- a/comp/forwarder/defaultforwarder/README.md +++ b/comp/forwarder/defaultforwarder/README.md @@ -96,7 +96,7 @@ When a transaction fails to be sent to a backend we blacklist that particular endpoints for some time to avoid flooding an unavailable endpoint (the transactions will be retried later). A blacklist is specific to one endpoint on one domain (ie: "http(s):///"). The blacklist time will grow, -up to a maximum, has more and more errors are encountered for that endpoint and +up to a maximum, as more and more errors are encountered for that endpoint and is gradually cleared when a transaction is successful. The blacklist is shared by all workers. diff --git a/comp/forwarder/defaultforwarder/go.mod b/comp/forwarder/defaultforwarder/go.mod index be52b299e0f34d..86e27522671f5e 100644 --- a/comp/forwarder/defaultforwarder/go.mod +++ b/comp/forwarder/defaultforwarder/go.mod @@ -129,7 +129,7 @@ require ( github.com/prometheus/common v0.46.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect - github.com/shirou/gopsutil/v3 v3.23.12 // indirect + github.com/shirou/gopsutil/v3 v3.24.1 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/spf13/afero v1.9.5 // indirect github.com/spf13/cast v1.6.0 // indirect diff --git a/comp/forwarder/defaultforwarder/go.sum b/comp/forwarder/defaultforwarder/go.sum index 134710b3034966..6672a32f4f042b 100644 --- a/comp/forwarder/defaultforwarder/go.sum +++ b/comp/forwarder/defaultforwarder/go.sum @@ -345,8 +345,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= -github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= -github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI= +github.com/shirou/gopsutil/v3 v3.24.1/go.mod h1:UU7a2MSBQa+kW1uuDq8DeEBS8kmrnQwsv2b5O513rwU= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -641,7 +641,6 @@ golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/comp/forwarder/orchestrator/orchestratorinterface/go.mod b/comp/forwarder/orchestrator/orchestratorinterface/go.mod index a84ec901e5a4f5..912b7685cc2e35 100644 --- a/comp/forwarder/orchestrator/orchestratorinterface/go.mod +++ b/comp/forwarder/orchestrator/orchestratorinterface/go.mod @@ -114,7 +114,7 @@ require ( github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.46.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - github.com/shirou/gopsutil/v3 v3.23.12 // indirect + github.com/shirou/gopsutil/v3 v3.24.1 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/spf13/afero v1.9.5 // indirect github.com/spf13/cast v1.6.0 // indirect diff --git a/comp/forwarder/orchestrator/orchestratorinterface/go.sum b/comp/forwarder/orchestrator/orchestratorinterface/go.sum index 9a55216bf1d602..6433544ec2fe04 100644 --- a/comp/forwarder/orchestrator/orchestratorinterface/go.sum +++ b/comp/forwarder/orchestrator/orchestratorinterface/go.sum @@ -332,8 +332,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= -github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= -github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI= +github.com/shirou/gopsutil/v3 v3.24.1/go.mod h1:UU7a2MSBQa+kW1uuDq8DeEBS8kmrnQwsv2b5O513rwU= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -593,7 +593,6 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go index 71690156a7295d..81f655c38cb532 100644 --- a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go +++ b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go @@ -404,6 +404,10 @@ apm_config: } func TestFetchSystemProbeAgent(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("system-probe does not support darwin") + } + defer func() { fetchSystemProbeConfig = configFetcher.SystemProbeConfig }() diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod b/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod index 0f2f673f5ee784..b6a0baae33f78a 100644 --- a/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod +++ b/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod @@ -12,7 +12,6 @@ replace ( github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface => ../../../../../forwarder/orchestrator/orchestratorinterface github.com/DataDog/datadog-agent/pkg/aggregator/ckey => ../../../../../../pkg/aggregator/ckey github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../../../../pkg/collector/check/defaults - github.com/DataDog/datadog-agent/pkg/comp/core/secrets => ../../../../../../pkg/comp/core/secrets github.com/DataDog/datadog-agent/pkg/config/env => ../../../../../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/model => ../../../../../../pkg/config/model github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../../../pkg/config/setup @@ -165,7 +164,7 @@ require ( github.com/prometheus/procfs v0.12.0 // indirect github.com/prometheus/statsd_exporter v0.22.7 // indirect github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 // indirect - github.com/shirou/gopsutil/v3 v3.23.12 // indirect + github.com/shirou/gopsutil/v3 v3.24.1 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/spf13/afero v1.9.5 // indirect github.com/spf13/cast v1.6.0 // indirect diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum b/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum index 38ff0fc90b92fe..aab162c36ce069 100644 --- a/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum +++ b/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum @@ -418,8 +418,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= -github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= -github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI= +github.com/shirou/gopsutil/v3 v3.24.1/go.mod h1:UU7a2MSBQa+kW1uuDq8DeEBS8kmrnQwsv2b5O513rwU= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -732,7 +732,6 @@ golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/comp/otelcol/otlp/example/metric/go.mod b/comp/otelcol/otlp/example/metric/go.mod index acd12cc9cf84fd..5577cf181dccbe 100644 --- a/comp/otelcol/otlp/example/metric/go.mod +++ b/comp/otelcol/otlp/example/metric/go.mod @@ -1,6 +1,6 @@ module github.com/DataDog/datadog-agent/pkg/otlp/example/metric -go 1.20 +go 1.21 require ( go.opentelemetry.io/otel v1.11.2 diff --git a/comp/remote-config/rcclient/component.go b/comp/remote-config/rcclient/component.go index 1afeeda03d9d7b..ad1111bd1e1192 100644 --- a/comp/remote-config/rcclient/component.go +++ b/comp/remote-config/rcclient/component.go @@ -19,7 +19,7 @@ type Component interface { // TODO: (components) Subscribe to AGENT_CONFIG configurations and start the remote config client // Once the remote config client is refactored and can push updates directly to the listeners, // we can remove this. - Start(clientName string) error + Start(agentName string) error // SubscribeAgentTask subscribe the remote-config client to AGENT_TASK SubscribeAgentTask() // Subscribe is the generic way to start listening to a specific product update diff --git a/comp/remote-config/rcclient/rcclient.go b/comp/remote-config/rcclient/rcclient.go index 596077ee955b91..f99e7cf0090890 100644 --- a/comp/remote-config/rcclient/rcclient.go +++ b/comp/remote-config/rcclient/rcclient.go @@ -78,7 +78,7 @@ func newRemoteConfigClient(deps dependencies) (provides, error) { c, err := client.NewUnverifiedGRPCClient( ipcAddress, config.GetIPCPort(), - security.FetchAuthToken, + func() (string, error) { return security.FetchAuthToken(config.Datadog) }, client.WithAgent("unknown", version.AgentVersion), client.WithPollInterval(5*time.Second), ) @@ -99,7 +99,7 @@ func newRemoteConfigClient(deps dependencies) (provides, error) { }, nil } -// Listen subscribes to AGENT_CONFIG configurations and start the remote config client +// Start subscribes to AGENT_CONFIG configurations and start the remote config client func (rc rcClient) Start(agentName string) error { rc.client.SetAgentName(agentName) diff --git a/comp/remote-config/rcclient/rcclient_test.go b/comp/remote-config/rcclient/rcclient_test.go index 7b29d00d8d6cfe..db49cd99da79c9 100644 --- a/comp/remote-config/rcclient/rcclient_test.go +++ b/comp/remote-config/rcclient/rcclient_test.go @@ -77,7 +77,7 @@ func TestAgentConfigCallback(t *testing.T) { assert.NoError(t, err) structRC.client, _ = client.NewUnverifiedGRPCClient( - ipcAddress, config.GetIPCPort(), security.FetchAuthToken, + ipcAddress, config.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(config.Datadog) }, client.WithAgent("test-agent", "9.99.9"), client.WithProducts([]data.Product{data.ProductAgentConfig}), client.WithPollInterval(time.Hour), diff --git a/comp/remote-config/rcservice/component.go b/comp/remote-config/rcservice/component.go new file mode 100644 index 00000000000000..a237f65dead0d4 --- /dev/null +++ b/comp/remote-config/rcservice/component.go @@ -0,0 +1,22 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package rcservice is a remote config service that can run within the agent to receive remote config updates from the DD backend. +package rcservice + +import ( + "context" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" +) + +// team: remote-config + +// Component is the component type. +type Component interface { + // ClientGetConfigs is the polling API called by tracers and agents to get the latest configurations + ClientGetConfigs(_ context.Context, request *pbgo.ClientGetConfigsRequest) (*pbgo.ClientGetConfigsResponse, error) + // ConfigGetState returns the state of the configuration and the director repos in the local store + ConfigGetState() (*pbgo.GetStateConfigResponse, error) +} diff --git a/comp/remote-config/rcservice/rcserviceimpl/rcservice.go b/comp/remote-config/rcservice/rcserviceimpl/rcservice.go new file mode 100644 index 00000000000000..39e864a79e8853 --- /dev/null +++ b/comp/remote-config/rcservice/rcserviceimpl/rcservice.go @@ -0,0 +1,103 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package rcserviceimpl is a remote config service that can run within the agent to receive remote config updates from the DD backend. +package rcserviceimpl + +import ( + "context" + "fmt" + "github.com/DataDog/datadog-agent/comp/core/log" + "github.com/DataDog/datadog-agent/pkg/util/optional" + + cfgcomp "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/comp/core/hostname" + "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" + "github.com/DataDog/datadog-agent/comp/remote-config/rctelemetryreporter" + "github.com/DataDog/datadog-agent/pkg/config" + remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" + configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "github.com/DataDog/datadog-agent/pkg/version" + + "go.uber.org/fx" +) + +// Module conditionally provides the remote config service. +func Module() fxutil.Module { + return fxutil.Component( + fx.Provide(newRemoteConfigServiceOptional), + ) +} + +type dependencies struct { + fx.In + + Lc fx.Lifecycle + + DdRcTelemetryReporter rctelemetryreporter.Component + Hostname hostname.Component + Cfg cfgcomp.Component + Logger log.Component +} + +// newRemoteConfigServiceOptional conditionally creates and configures a new remote config service, based on whether RC is enabled. +func newRemoteConfigServiceOptional(deps dependencies) optional.Option[rcservice.Component] { + none := optional.NewNoneOption[rcservice.Component]() + if !config.IsRemoteConfigEnabled(deps.Cfg) { + return none + } + + configService, err := newRemoteConfigService(deps) + if err != nil { + deps.Logger.Errorf("remote config service not initialized or started: %s", err) + return none + } + + return optional.NewOption[rcservice.Component](configService) +} + +// newRemoteConfigServiceOptional creates and configures a new remote config service +func newRemoteConfigService(deps dependencies) (rcservice.Component, error) { + apiKey := config.Datadog.GetString("api_key") + if config.Datadog.IsSet("remote_configuration.api_key") { + apiKey = config.Datadog.GetString("remote_configuration.api_key") + } + apiKey = configUtils.SanitizeAPIKey(apiKey) + baseRawURL := configUtils.GetMainEndpoint(config.Datadog, "https://config.", "remote_configuration.rc_dd_url") + traceAgentEnv := configUtils.GetTraceAgentDefaultEnv(config.Datadog) + configuredTags := configUtils.GetConfiguredTags(config.Datadog, false) + + configService, err := remoteconfig.NewService( + config.Datadog, + apiKey, + baseRawURL, + deps.Hostname.GetSafe(context.Background()), + configuredTags, + deps.DdRcTelemetryReporter, + version.AgentVersion, + remoteconfig.WithTraceAgentEnv(traceAgentEnv), + ) + if err != nil { + return nil, fmt.Errorf("unable to create remote config service: %w", err) + } + + deps.Lc.Append(fx.Hook{OnStart: func(_ context.Context) error { + configService.Start() + deps.Logger.Info("remote config service started") + return nil + }}) + deps.Lc.Append(fx.Hook{OnStop: func(_ context.Context) error { + err = configService.Stop() + if err != nil { + deps.Logger.Errorf("unable to stop remote config service: %s", err) + return err + } + deps.Logger.Info("remote config service stopped") + return nil + }}) + + return configService, nil +} diff --git a/comp/remote-config/rctelemetryreporter/component.go b/comp/remote-config/rctelemetryreporter/component.go new file mode 100644 index 00000000000000..64d3613dcacc8a --- /dev/null +++ b/comp/remote-config/rctelemetryreporter/component.go @@ -0,0 +1,17 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package rctelemetryreporter provides a component that sends RC-specific metrics to the DD backend. +package rctelemetryreporter + +// team: remote-config + +// Component is the component type. +type Component interface { + // IncTimeout increments the DdRcTelemetryReporter BypassTimeoutCounter counter. + IncTimeout() + // IncRateLimit increments the DdRcTelemetryReporter BypassRateLimitCounter counter. + IncRateLimit() +} diff --git a/comp/remote-config/rctelemetryreporter/rctelemetryreporterimpl/rctelemetryreporter.go b/comp/remote-config/rctelemetryreporter/rctelemetryreporterimpl/rctelemetryreporter.go new file mode 100644 index 00000000000000..080c2ee27e9ca0 --- /dev/null +++ b/comp/remote-config/rctelemetryreporter/rctelemetryreporterimpl/rctelemetryreporter.go @@ -0,0 +1,59 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package rctelemetryreporterimpl provides a DdRcTelemetryReporter that sends RC-specific metrics to the DD backend. +package rctelemetryreporterimpl + +import ( + "github.com/DataDog/datadog-agent/comp/remote-config/rctelemetryreporter" + "github.com/DataDog/datadog-agent/pkg/telemetry" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" + + "go.uber.org/fx" +) + +// Module defines the fx options for this component. +func Module() fxutil.Module { + return fxutil.Component( + fx.Provide(newDdRcTelemetryReporter), + ) +} + +// DdRcTelemetryReporter is a datadog-agent telemetry counter for RC cache bypass metrics. It implements the RcTelemetryReporter interface. +type DdRcTelemetryReporter struct { + BypassRateLimitCounter telemetry.Counter + BypassTimeoutCounter telemetry.Counter +} + +// IncRateLimit increments the DdRcTelemetryReporter BypassRateLimitCounter counter. +func (r *DdRcTelemetryReporter) IncRateLimit() { + r.BypassRateLimitCounter.Inc() +} + +// IncTimeout increments the DdRcTelemetryReporter BypassTimeoutCounter counter. +func (r *DdRcTelemetryReporter) IncTimeout() { + r.BypassTimeoutCounter.Inc() +} + +// newDdRcTelemetryReporter creates a new Remote Config telemetry reporter for sending RC metrics to Datadog +func newDdRcTelemetryReporter() rctelemetryreporter.Component { + commonOpts := telemetry.Options{NoDoubleUnderscoreSep: true} + return &DdRcTelemetryReporter{ + BypassRateLimitCounter: telemetry.NewCounterWithOpts( + "remoteconfig", + "cache_bypass_ratelimiter_skip", + []string{}, + "Number of Remote Configuration cache bypass requests skipped by rate limiting.", + commonOpts, + ), + BypassTimeoutCounter: telemetry.NewCounterWithOpts( + "remoteconfig", + "cache_bypass_timeout", + []string{}, + "Number of Remote Configuration cache bypass requests that timeout.", + commonOpts, + ), + } +} diff --git a/comp/systray/systray/systrayimpl/doconfigure.go b/comp/systray/systray/systrayimpl/doconfigure.go index 844872fd95de70..dac4988a6f9e92 100644 --- a/comp/systray/systray/systrayimpl/doconfigure.go +++ b/comp/systray/systray/systrayimpl/doconfigure.go @@ -32,7 +32,7 @@ func doConfigure(s *systrayImpl) error { } // Read the authentication token: can only be done if user can read from datadog.yaml - authToken, err := security.FetchAuthToken() + authToken, err := security.FetchAuthToken(pkgconfig.Datadog) if err != nil { return err } @@ -44,7 +44,7 @@ func doConfigure(s *systrayImpl) error { return err } urlstr := fmt.Sprintf("https://%v:%v/agent/gui/csrf-token", ipcAddress, s.config.GetInt("cmd_port")) - err = util.SetAuthToken() + err = util.SetAuthToken(pkgconfig.Datadog) if err != nil { return err } diff --git a/comp/systray/systray/systrayimpl/doflare.go b/comp/systray/systray/systrayimpl/doflare.go index 9b36fedfb349a5..f4cdb0c955dcd7 100644 --- a/comp/systray/systray/systrayimpl/doflare.go +++ b/comp/systray/systray/systrayimpl/doflare.go @@ -178,7 +178,7 @@ func requestFlare(s *systrayImpl, caseID, customerEmail string) (response string urlstr := fmt.Sprintf("https://%v:%v/agent/flare", ipcAddress, config.Datadog.GetInt("cmd_port")) // Set session token - e = util.SetAuthToken() + e = util.SetAuthToken(config.Datadog) if e != nil { return } diff --git a/comp/trace/agent/run.go b/comp/trace/agent/run.go index 3e36e9ed4785c9..5d303795f92146 100644 --- a/comp/trace/agent/run.go +++ b/comp/trace/agent/run.go @@ -76,7 +76,7 @@ func runAgentSidekicks(ag *agent) error { // the trace agent. // pkg/config is not a go-module yet and pulls a large chunk of Agent code base with it. Using it within the // trace-agent would largely increase the number of module pulled by OTEL when using the pkg/trace go-module. - if err := apiutil.CreateAndSetAuthToken(); err != nil { + if err := apiutil.CreateAndSetAuthToken(coreconfig.Datadog); err != nil { log.Errorf("could not set auth token: %s", err) } else { ag.Agent.DebugServer.AddRoute("/config", ag.config.GetConfigHandler()) @@ -188,5 +188,5 @@ func newConfigFetcher() (rc.ConfigUpdater, error) { } // Auth tokens are handled by the rcClient - return rc.NewAgentGRPCConfigFetcher(ipcAddress, coreconfig.GetIPCPort(), security.FetchAuthToken) + return rc.NewAgentGRPCConfigFetcher(ipcAddress, coreconfig.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(coreconfig.Datadog) }) } diff --git a/comp/trace/config/setup.go b/comp/trace/config/setup.go index 6fe994f215db5e..483356fce1e5b2 100644 --- a/comp/trace/config/setup.go +++ b/comp/trace/config/setup.go @@ -20,6 +20,9 @@ import ( "strings" "time" + "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes" + "go.opentelemetry.io/collector/component/componenttest" + corecompcfg "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/collectors" @@ -30,8 +33,6 @@ import ( rc "github.com/DataDog/datadog-agent/pkg/config/remote/client" "github.com/DataDog/datadog-agent/pkg/config/remote/data" "github.com/DataDog/datadog-agent/pkg/config/utils" - "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes" - "go.opentelemetry.io/collector/component/componenttest" //nolint:revive // TODO(APM) Fix revive linter configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" @@ -121,7 +122,7 @@ func prepareConfig(c corecompcfg.Component) (*config.AgentConfig, error) { client, err := rc.NewGRPCClient( ipcAddress, coreconfig.GetIPCPort(), - security.FetchAuthToken, + func() (string, error) { return security.FetchAuthToken(c) }, rc.WithAgent(rcClientName, version.AgentVersion), rc.WithProducts([]data.Product{data.ProductAPMSampling, data.ProductAgentConfig}), rc.WithPollInterval(rcClientPollInterval), diff --git a/docs/cloud-workload-security/backend.md b/docs/cloud-workload-security/backend.md index 94c3014b46fd8f..28f9983724698f 100644 --- a/docs/cloud-workload-security/backend.md +++ b/docs/cloud-workload-security/backend.md @@ -123,6 +123,10 @@ CSM Threats logs have the following JSON schema: "type": "string", "format": "date-time", "description": "Creation time of the container" + }, + "variables": { + "$ref": "#/$defs/Variables", + "description": "Variables values" } }, "additionalProperties": false, @@ -225,6 +229,10 @@ CSM Threats logs have the following JSON schema: "origin": { "type": "string", "description": "Origin of the event" + }, + "variables": { + "$ref": "#/$defs/Variables", + "description": "Variables values" } }, "additionalProperties": false, @@ -900,11 +908,15 @@ CSM Threats logs have the following JSON schema: }, "is_exec_child": { "type": "boolean", - "description": "Indicates wether the process is an exec child of its parent" + "description": "Indicates whether the process is an exec following another exec" }, "source": { "type": "string", "description": "Process source" + }, + "variables": { + "$ref": "#/$defs/Variables", + "description": "Variables values" } }, "additionalProperties": false, @@ -1028,12 +1040,16 @@ CSM Threats logs have the following JSON schema: }, "is_exec_child": { "type": "boolean", - "description": "Indicates wether the process is an exec child of its parent" + "description": "Indicates whether the process is an exec following another exec" }, "source": { "type": "string", "description": "Process source" }, + "variables": { + "$ref": "#/$defs/Variables", + "description": "Variables values" + }, "parent": { "$ref": "#/$defs/Process", "description": "Parent process" @@ -1320,6 +1336,10 @@ CSM Threats logs have the following JSON schema: "additionalProperties": false, "type": "object", "description": "UserSessionContextSerializer serializes the user session context to JSON" + }, + "Variables": { + "type": "object", + "description": "Variables serializes the variable values" } }, "properties": { @@ -1606,6 +1626,10 @@ CSM Threats logs have the following JSON schema: "type": "string", "format": "date-time", "description": "Creation time of the container" + }, + "variables": { + "$ref": "#/$defs/Variables", + "description": "Variables values" } }, "additionalProperties": false, @@ -1619,7 +1643,11 @@ CSM Threats logs have the following JSON schema: | ----- | ----------- | | `id` | Container ID | | `created_at` | Creation time of the container | +| `variables` | Variables values | +| References | +| ---------- | +| [Variables](#variables) | ## `DDContext` @@ -1766,6 +1794,10 @@ CSM Threats logs have the following JSON schema: "origin": { "type": "string", "description": "Origin of the event" + }, + "variables": { + "$ref": "#/$defs/Variables", + "description": "Variables values" } }, "additionalProperties": false, @@ -1783,7 +1815,11 @@ CSM Threats logs have the following JSON schema: | `async` | True if the event was asynchronous | | `matched_rules` | The list of rules that the event matched (only valid in the context of an anomaly) | | `origin` | Origin of the event | +| `variables` | Variables values | +| References | +| ---------- | +| [Variables](#variables) | ## `ExitEvent` @@ -2710,11 +2746,15 @@ CSM Threats logs have the following JSON schema: }, "is_exec_child": { "type": "boolean", - "description": "Indicates wether the process is an exec child of its parent" + "description": "Indicates whether the process is an exec following another exec" }, "source": { "type": "string", "description": "Process source" + }, + "variables": { + "$ref": "#/$defs/Variables", + "description": "Variables values" } }, "additionalProperties": false, @@ -2755,8 +2795,9 @@ CSM Threats logs have the following JSON schema: | `envs_truncated` | Indicator of environments variable truncation | | `is_thread` | Indicates whether the process is considered a thread (that is, a child process that hasn't executed another program) | | `is_kworker` | Indicates whether the process is a kworker | -| `is_exec_child` | Indicates wether the process is an exec child of its parent | +| `is_exec_child` | Indicates whether the process is an exec following another exec | | `source` | Process source | +| `variables` | Variables values | | References | | ---------- | @@ -2765,6 +2806,7 @@ CSM Threats logs have the following JSON schema: | [File](#file) | | [File](#file) | | [ContainerContext](#containercontext) | +| [Variables](#variables) | ## `ProcessContext` @@ -2883,12 +2925,16 @@ CSM Threats logs have the following JSON schema: }, "is_exec_child": { "type": "boolean", - "description": "Indicates wether the process is an exec child of its parent" + "description": "Indicates whether the process is an exec following another exec" }, "source": { "type": "string", "description": "Process source" }, + "variables": { + "$ref": "#/$defs/Variables", + "description": "Variables values" + }, "parent": { "$ref": "#/$defs/Process", "description": "Parent process" @@ -2939,8 +2985,9 @@ CSM Threats logs have the following JSON schema: | `envs_truncated` | Indicator of environments variable truncation | | `is_thread` | Indicates whether the process is considered a thread (that is, a child process that hasn't executed another program) | | `is_kworker` | Indicates whether the process is a kworker | -| `is_exec_child` | Indicates wether the process is an exec child of its parent | +| `is_exec_child` | Indicates whether the process is an exec following another exec | | `source` | Process source | +| `variables` | Variables values | | `parent` | Parent process | | `ancestors` | Ancestor processes | @@ -2951,6 +2998,7 @@ CSM Threats logs have the following JSON schema: | [File](#file) | | [File](#file) | | [ContainerContext](#containercontext) | +| [Variables](#variables) | | [Process](#process) | ## `ProcessCredentials` @@ -3377,6 +3425,19 @@ CSM Threats logs have the following JSON schema: | `k8s_extra` | Extra of the Kubernetes "kubectl exec" session | +## `Variables` + + +{{< code-block lang="json" collapsible="true" >}} +{ + "type": "object", + "description": "Variables serializes the variable values" +} + +{{< /code-block >}} + + + [1]: /security/threats/ [2]: /security/threats/agent_expressions diff --git a/docs/cloud-workload-security/backend.schema.json b/docs/cloud-workload-security/backend.schema.json index 4eddbf730d38a9..1db0c944c75fb1 100644 --- a/docs/cloud-workload-security/backend.schema.json +++ b/docs/cloud-workload-security/backend.schema.json @@ -107,6 +107,10 @@ "type": "string", "format": "date-time", "description": "Creation time of the container" + }, + "variables": { + "$ref": "#/$defs/Variables", + "description": "Variables values" } }, "additionalProperties": false, @@ -209,6 +213,10 @@ "origin": { "type": "string", "description": "Origin of the event" + }, + "variables": { + "$ref": "#/$defs/Variables", + "description": "Variables values" } }, "additionalProperties": false, @@ -884,11 +892,15 @@ }, "is_exec_child": { "type": "boolean", - "description": "Indicates wether the process is an exec child of its parent" + "description": "Indicates whether the process is an exec following another exec" }, "source": { "type": "string", "description": "Process source" + }, + "variables": { + "$ref": "#/$defs/Variables", + "description": "Variables values" } }, "additionalProperties": false, @@ -1012,12 +1024,16 @@ }, "is_exec_child": { "type": "boolean", - "description": "Indicates wether the process is an exec child of its parent" + "description": "Indicates whether the process is an exec following another exec" }, "source": { "type": "string", "description": "Process source" }, + "variables": { + "$ref": "#/$defs/Variables", + "description": "Variables values" + }, "parent": { "$ref": "#/$defs/Process", "description": "Parent process" @@ -1304,6 +1320,10 @@ "additionalProperties": false, "type": "object", "description": "UserSessionContextSerializer serializes the user session context to JSON" + }, + "Variables": { + "type": "object", + "description": "Variables serializes the variable values" } }, "properties": { diff --git a/docs/cloud-workload-security/scripts/backend-doc-gen.py b/docs/cloud-workload-security/scripts/backend-doc-gen.py index a349d60d79de1f..705c241bfab09e 100644 --- a/docs/cloud-workload-security/scripts/backend-doc-gen.py +++ b/docs/cloud-workload-security/scripts/backend-doc-gen.py @@ -78,7 +78,7 @@ def extract_ref_name_and_anchor(ref): for name, definition in json_top_node["$defs"].items(): references = [] descriptions = [] - for prop_name, prop in definition["properties"].items(): + for prop_name, prop in definition.get("properties", {}).items(): if "$ref" in prop: ref_name, ref_anchor = extract_ref_name_and_anchor(prop["$ref"]) references.append(DefinitionReference(ref_name, ref_anchor)) diff --git a/docs/dev/contributing.md b/docs/dev/contributing.md index 39fcdfb4fc60c6..dc433063d94257 100644 --- a/docs/dev/contributing.md +++ b/docs/dev/contributing.md @@ -37,16 +37,40 @@ Have you fixed a bug or written a new check and want to share it? Many thanks! In order to ease/speed up our review, here are some items you can check/improve when submitting your PR: - * have a proper commit history (we advise you to rebase if needed). - * write tests for the code you wrote. - * preferably make sure that all tests pass locally. - * summarize your PR with an explanatory title and a message describing your - changes, cross-referencing any related bugs/PRs. - * use [Reno](#reno) to create a releasenote. - * open your PR against the `main` branch. - * for PRs from contributors with write access to the repository (for community PRs, will be done by Datadog employees): - + set the relevant `team/` label - + add a milestone to your PR (by default, use the highest milestone version available, ex: `7.49.0`) +
+Contributor Checklist + +- [ ] Have a proper commit history (we advise you to rebase if needed) with clear commit messages. + +- [ ] Write tests for the code you wrote. + +- [ ] Preferably make sure that all tests pass locally. + +- [ ] Summarize your PR with an explanatory title and a message describing your changes, cross-referencing any related bugs/PRs. + +- [ ] Use [Reno](#reno) to create a release note. + +- [ ] Open your PR against the `main` branch. + +- [ ] Provide adequate QA/testing plan information. +
+
+ +
+Reviewer Checklist + +- [ ] The added code comes with tests. + +- [ ] The CI is green, all tests are passing (required or not). + +- [ ] All applicable labels are set on the PR (see [PR labels list](#pr-labels)). + +- [ ] If applicable, the [config template](https://github.com/DataDog/datadog-agent/blob/main/pkg/config/config_template.yaml) has been updated. +
+
+ +> [!NOTE] +> Adding GitHub labels is only possible for contributors with write access. Your pull request must pass all CI tests before we will merge it. If you're seeing an error and don't think it's your fault, it may not be! [Join us on Slack][slack] @@ -264,6 +288,8 @@ labels that can be use: - `major_change`: to flag the PR as a major change impacting many/all teams working on the agent and will require deeper QA (example: when we change the Python version shipped in the agent). +- `need-change/operator`, `need-change/helm`: indicate that the configuration needs to be modified in the operator / helm chart as well. +- `k8s/`: indicate the lowest Kubernetes version compatible with the PR's feature. ## Integrations diff --git a/go.mod b/go.mod index 7dfb6bfc2930b2..47e13e5bcb297d 100644 --- a/go.mod +++ b/go.mod @@ -38,6 +38,7 @@ replace ( github.com/DataDog/datadog-agent/comp/logs/agent/config => ./comp/logs/agent/config github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter => ./comp/otelcol/otlp/components/exporter/serializerexporter github.com/DataDog/datadog-agent/pkg/aggregator/ckey => ./pkg/aggregator/ckey/ + github.com/DataDog/datadog-agent/pkg/api => ./pkg/api github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ./pkg/collector/check/defaults github.com/DataDog/datadog-agent/pkg/config/env => ./pkg/config/env github.com/DataDog/datadog-agent/pkg/config/logs => ./pkg/config/logs @@ -166,7 +167,7 @@ require ( github.com/google/gofuzz v1.2.0 github.com/google/gopacket v1.1.19 github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b - github.com/gorilla/mux v1.8.0 + github.com/gorilla/mux v1.8.1 github.com/gosnmp/gosnmp v1.37.1-0.20240115134726-db0c09337869 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 @@ -326,7 +327,7 @@ require ( github.com/DisposaBoy/JsonConfigReader v0.0.0-20201129172854-99cf318d67e7 // indirect github.com/GoogleCloudPlatform/docker-credential-gcr v2.0.5+incompatible // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver v1.5.0 // indirect + github.com/Masterminds/semver v1.5.0 github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/OneOfOne/xxhash v1.2.8 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 @@ -613,6 +614,7 @@ require ( github.com/DataDog/datadog-agent/comp/logs/agent/config v0.51.0-rc.2 github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/pkg/aggregator/ckey v0.51.0-rc.2 + github.com/DataDog/datadog-agent/pkg/api v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.51.0-rc.2 github.com/DataDog/datadog-agent/pkg/config/env v0.51.0-rc.2 github.com/DataDog/datadog-agent/pkg/config/logs v0.51.0-rc.2 diff --git a/go.sum b/go.sum index b6e73009113642..27a8efea4a5a55 100644 --- a/go.sum +++ b/go.sum @@ -932,8 +932,8 @@ github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qK github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= diff --git a/internal/tools/independent-lint/go.mod b/internal/tools/independent-lint/go.mod index 8b9f1394e8b581..6aced20e33891c 100644 --- a/internal/tools/independent-lint/go.mod +++ b/internal/tools/independent-lint/go.mod @@ -1,6 +1,6 @@ module github.com/DataDog/datadog-agent/cmd/independent-lint -go 1.20 +go 1.21 require golang.org/x/mod v0.5.1 diff --git a/omnibus/resources/agent/msi/source.wxs.erb b/omnibus/resources/agent/msi/source.wxs.erb index 94e5b0770716f0..6052c001d7639d 100644 --- a/omnibus/resources/agent/msi/source.wxs.erb +++ b/omnibus/resources/agent/msi/source.wxs.erb @@ -693,7 +693,6 @@ Value="!(loc.LaunchAgentManager)" /> - diff --git a/pkg/api/docs.go b/pkg/api/docs.go new file mode 100644 index 00000000000000..b20b14758820f3 --- /dev/null +++ b/pkg/api/docs.go @@ -0,0 +1,7 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package api contains logic related to the agent API servers. +package api diff --git a/pkg/api/go.mod b/pkg/api/go.mod new file mode 100644 index 00000000000000..8df6246805c26c --- /dev/null +++ b/pkg/api/go.mod @@ -0,0 +1,83 @@ +module github.com/DataDog/datadog-agent/pkg/api + +go 1.21 + +replace ( + github.com/DataDog/datadog-agent/comp/core/secrets => ../../comp/core/secrets + github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../collector/check/defaults + github.com/DataDog/datadog-agent/pkg/config => ../config + github.com/DataDog/datadog-agent/pkg/config/env => ../config/env + github.com/DataDog/datadog-agent/pkg/config/model => ../config/model + github.com/DataDog/datadog-agent/pkg/config/setup => ../config/setup + github.com/DataDog/datadog-agent/pkg/config/utils => ../config/utils + github.com/DataDog/datadog-agent/pkg/status/health => ../status/health + github.com/DataDog/datadog-agent/pkg/util/executable => ../util/executable + github.com/DataDog/datadog-agent/pkg/util/filesystem => ../util/filesystem + github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../util/hostname/validate + github.com/DataDog/datadog-agent/pkg/util/log => ../util/log + github.com/DataDog/datadog-agent/pkg/util/optional => ../util/optional + github.com/DataDog/datadog-agent/pkg/util/pointer => ../util/pointer + github.com/DataDog/datadog-agent/pkg/util/scrubber => ../util/scrubber + github.com/DataDog/datadog-agent/pkg/util/system => ../util/system + github.com/DataDog/datadog-agent/pkg/util/system/socket => ../util/system/socket + github.com/DataDog/datadog-agent/pkg/util/winutil => ../util/winutil + github.com/DataDog/datadog-agent/pkg/version => ../version +) + +require ( + github.com/DataDog/datadog-agent/pkg/config/model v0.51.0-rc.2 + github.com/DataDog/datadog-agent/pkg/config/utils v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/status/health v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.51.0-rc.2 + github.com/DataDog/datadog-agent/pkg/util/log v0.51.0-rc.2 + github.com/gorilla/mux v1.8.1 + github.com/stretchr/testify v1.8.4 +) + +require ( + github.com/DataDog/datadog-agent/comp/core/secrets v0.51.0-rc.2 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.51.0-rc.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.51.0-rc.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.51.0-rc.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.51.0-rc.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.51.0-rc.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.51.0-rc.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.50.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.51.0-rc.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.51.0-rc.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.51.0-rc.2 // indirect + github.com/DataDog/datadog-agent/pkg/version v0.51.0-rc.2 // indirect + github.com/DataDog/viper v1.12.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.1 // indirect + github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/pelletier/go-toml v1.2.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/shirou/gopsutil/v3 v3.23.12 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + go.uber.org/atomic v1.11.0 // indirect + golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/sys v0.16.0 // indirect + golang.org/x/text v0.9.0 // indirect + golang.org/x/tools v0.16.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/pkg/api/go.sum b/pkg/api/go.sum new file mode 100644 index 00000000000000..7bcfd3df03468c --- /dev/null +++ b/pkg/api/go.sum @@ -0,0 +1,363 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-agent/comp/core/flare/types v0.51.0-rc.2 h1:ZkaW23KeBQ0FxG8FXztOCYeJL1P47URfrWZkaFRjZ0Q= +github.com/DataDog/datadog-agent/comp/core/flare/types v0.51.0-rc.2/go.mod h1:h0h6cm8eqb1JnuqHMCdTir+c4IUlOUnOy6xrmvhQXOY= +github.com/DataDog/datadog-agent/comp/core/telemetry v0.51.0-rc.2 h1:r0oFEwVFnr7G+1jIu5jBXVqMTPL/zH2dm2J0Gy/cOiA= +github.com/DataDog/datadog-agent/comp/core/telemetry v0.51.0-rc.2/go.mod h1:bXUOZOuw5eYyTwPgPodrGu2h4ziKMOHutfUaTwPhhX0= +github.com/DataDog/datadog-agent/pkg/telemetry v0.51.0-rc.2 h1:OCnwpBOwS/BRYV0gXoDDeA926DeiSoTFyA2K8uvHU80= +github.com/DataDog/datadog-agent/pkg/telemetry v0.51.0-rc.2/go.mod h1:YJucwMRrRdWbI1hUOyNeXS2uLT9HshHhdLD8f1qKagY= +github.com/DataDog/datadog-agent/pkg/util/fxutil v0.51.0-rc.2 h1:aeYwvAFgVyAJnuSfdAiOuEQCcs5JN1Ah0pjpcq5tlaI= +github.com/DataDog/datadog-agent/pkg/util/fxutil v0.51.0-rc.2/go.mod h1:+ViwDZ54Ox34yYqgYozTFx7xjXG/+IhhudfE1Nx/9+A= +github.com/DataDog/datadog-agent/pkg/util/testutil v0.50.2 h1:o7Ys6VLP6wVQULSOlTlv/N9X9l4toLUBJSE+iOdA2vo= +github.com/DataDog/datadog-agent/pkg/util/testutil v0.50.2/go.mod h1:X7ODRWbUGbvM07oZPDbrYzKqt8205dXjD35M9yWrMm8= +github.com/DataDog/viper v1.12.0 h1:FufyZpZPxyszafSV5B8Q8it75IhhuJwH0T7QpT6HnD0= +github.com/DataDog/viper v1.12.0/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= +github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= +go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= +go.opentelemetry.io/otel/exporters/prometheus v0.42.0 h1:jwV9iQdvp38fxXi8ZC+lNpxjK16MRcZlpDYvbuO1FiA= +go.opentelemetry.io/otel/exporters/prometheus v0.42.0/go.mod h1:f3bYiqNqhoPxkvI2LrXqQVC546K7BuRDL/kKuxkujhA= +go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= +go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= +go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= +go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= +go.opentelemetry.io/otel/sdk/metric v1.20.0 h1:5eD40l/H2CqdKmbSV7iht2KMK0faAIL2pVYzJOWobGk= +go.opentelemetry.io/otel/sdk/metric v1.20.0/go.mod h1:AGvpC+YF/jblITiafMTYgvRBUiwi9hZf0EYE2E5XlS8= +go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= +go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= +go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= +go.uber.org/fx v1.18.2 h1:bUNI6oShr+OVFQeU8cDNbnN7VFsu+SsjHzUF51V/GAU= +go.uber.org/fx v1.18.2/go.mod h1:g0V1KMQ66zIRk8bLu3Ea5Jt2w/cHlOIp4wdRsgh0JaY= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= +go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611 h1:qCEDpW1G+vcj3Y7Fy52pEM1AWm3abj8WimGYejI3SC4= +golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= +golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/pkg/api/healthprobe/healthprobe.go b/pkg/api/healthprobe/healthprobe.go index 9e324b9fa106ae..6e075817d5dbca 100644 --- a/pkg/api/healthprobe/healthprobe.go +++ b/pkg/api/healthprobe/healthprobe.go @@ -17,7 +17,7 @@ import ( "github.com/gorilla/mux" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -27,7 +27,7 @@ const defaultTimeout = time.Second // Serve configures and starts the http server for the health check. // It returns an error if the setup failed, or runs the server in a goroutine. // Stop the server by cancelling the passed context. -func Serve(ctx context.Context, port int) error { +func Serve(ctx context.Context, config model.Reader, port int) error { if port == 0 { return errors.New("port should be non-zero") } @@ -37,10 +37,10 @@ func Serve(ctx context.Context, port int) error { } r := mux.NewRouter() - r.HandleFunc("/live", liveHandler) - r.HandleFunc("/ready", readyHandler) + r.HandleFunc("/live", liveHandler(config)) + r.HandleFunc("/ready", readyHandler(config)) // Default route for backward compatibility - r.NewRoute().HandlerFunc(liveHandler) + r.NewRoute().HandlerFunc(liveHandler(config)) srv := &http.Server{ Handler: r, @@ -64,7 +64,7 @@ func closeOnContext(ctx context.Context, srv *http.Server) { srv.Shutdown(timeout) //nolint:errcheck } -func healthHandler(getStatusNonBlocking func() (health.Status, error), w http.ResponseWriter, _ *http.Request) { +func healthHandler(config model.Reader, getStatusNonBlocking func() (health.Status, error), w http.ResponseWriter, _ *http.Request) { health, err := getStatusNonBlocking() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) @@ -73,7 +73,7 @@ func healthHandler(getStatusNonBlocking func() (health.Status, error), w http.Re if len(health.Unhealthy) > 0 { w.WriteHeader(http.StatusInternalServerError) log.Infof("Healthcheck failed on: %v", health.Unhealthy) - if config.Datadog.GetBool("log_all_goroutines_when_unhealthy") { + if config.GetBool("log_all_goroutines_when_unhealthy") { log.Infof("Goroutines stack: \n%s\n", allStack()) } } @@ -89,10 +89,14 @@ func healthHandler(getStatusNonBlocking func() (health.Status, error), w http.Re w.Write(jsonHealth) } -func liveHandler(w http.ResponseWriter, r *http.Request) { - healthHandler(health.GetLiveNonBlocking, w, r) +func liveHandler(config model.Reader) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + healthHandler(config, health.GetLiveNonBlocking, w, r) + } } -func readyHandler(w http.ResponseWriter, r *http.Request) { - healthHandler(health.GetReadyNonBlocking, w, r) +func readyHandler(config model.Reader) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + healthHandler(config, health.GetReadyNonBlocking, w, r) + } } diff --git a/pkg/api/security/security.go b/pkg/api/security/security.go index 39d01e9caa16ce..d4aefbc38a81c0 100644 --- a/pkg/api/security/security.go +++ b/pkg/api/security/security.go @@ -22,7 +22,7 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/pkg/config" + configModel "github.com/DataDog/datadog-agent/pkg/config/model" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/filesystem" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -113,27 +113,27 @@ func GenerateRootCert(hosts []string, bits int) (cert *x509.Certificate, certPEM } // GetAuthTokenFilepath returns the path to the auth_token file. -func GetAuthTokenFilepath() string { - if config.Datadog.GetString("auth_token_file_path") != "" { - return config.Datadog.GetString("auth_token_file_path") +func GetAuthTokenFilepath(config configModel.Reader) string { + if config.GetString("auth_token_file_path") != "" { + return config.GetString("auth_token_file_path") } - return filepath.Join(filepath.Dir(config.Datadog.ConfigFileUsed()), authTokenName) + return filepath.Join(filepath.Dir(config.ConfigFileUsed()), authTokenName) } // FetchAuthToken gets the authentication token from the auth token file & creates one if it doesn't exist // Requires that the config has been set up before calling -func FetchAuthToken() (string, error) { - return fetchAuthToken(false) +func FetchAuthToken(config configModel.Reader) (string, error) { + return fetchAuthToken(config, false) } // CreateOrFetchToken gets the authentication token from the auth token file & creates one if it doesn't exist // Requires that the config has been set up before calling -func CreateOrFetchToken() (string, error) { - return fetchAuthToken(true) +func CreateOrFetchToken(config configModel.Reader) (string, error) { + return fetchAuthToken(config, true) } -func fetchAuthToken(tokenCreationAllowed bool) (string, error) { - authTokenFile := GetAuthTokenFilepath() +func fetchAuthToken(config configModel.Reader, tokenCreationAllowed bool) (string, error) { + authTokenFile := GetAuthTokenFilepath(config) // Create a new token if it doesn't exist and if permitted by calling func if _, e := os.Stat(authTokenFile); os.IsNotExist(e) && tokenCreationAllowed { @@ -174,8 +174,8 @@ func fetchAuthToken(tokenCreationAllowed bool) (string, error) { // 2nd. from the filesystem // If using the token from the filesystem, the token file must be next to the datadog.yaml // with the filename: cluster_agent.auth_token, it will fail if the file does not exist -func GetClusterAgentAuthToken() (string, error) { - return getClusterAgentAuthToken(false) +func GetClusterAgentAuthToken(config configModel.Reader) (string, error) { + return getClusterAgentAuthToken(config, false) } // CreateOrGetClusterAgentAuthToken load the authentication token from: @@ -184,19 +184,19 @@ func GetClusterAgentAuthToken() (string, error) { // If using the token from the filesystem, the token file must be next to the datadog.yaml // with the filename: cluster_agent.auth_token, if such file does not exist it will be // created and populated with a newly generated token. -func CreateOrGetClusterAgentAuthToken() (string, error) { - return getClusterAgentAuthToken(true) +func CreateOrGetClusterAgentAuthToken(config configModel.Reader) (string, error) { + return getClusterAgentAuthToken(config, true) } -func getClusterAgentAuthToken(tokenCreationAllowed bool) (string, error) { - authToken := config.Datadog.GetString("cluster_agent.auth_token") +func getClusterAgentAuthToken(config configModel.Reader, tokenCreationAllowed bool) (string, error) { + authToken := config.GetString("cluster_agent.auth_token") if authToken != "" { log.Infof("Using configured cluster_agent.auth_token") return authToken, validateAuthToken(authToken) } // load the cluster agent auth token from filesystem - tokenAbsPath := filepath.Join(configUtils.ConfFileDirectory(config.Datadog), clusterAgentAuthTokenFilename) + tokenAbsPath := filepath.Join(configUtils.ConfFileDirectory(config), clusterAgentAuthTokenFilename) log.Debugf("Empty cluster_agent.auth_token, loading from %s", tokenAbsPath) // Create a new token if it doesn't exist diff --git a/pkg/api/security/security_test.go b/pkg/api/security/security_test.go index d1d51aafd9e314..d1b94250269da7 100644 --- a/pkg/api/security/security_test.go +++ b/pkg/api/security/security_test.go @@ -11,15 +11,16 @@ import ( "fmt" "os" "path/filepath" + "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) -func initMockConf(t *testing.T) string { +func initMockConf(t *testing.T) (model.Config, string) { testDir := t.TempDir() f, err := os.CreateTemp(testDir, "fake-datadog-yaml-") @@ -28,16 +29,16 @@ func initMockConf(t *testing.T) string { f.Close() }) - mockConfig := config.Mock(t) + mockConfig := model.NewConfig("datadog", "fake-datadog-yaml", strings.NewReplacer(".", "_")) mockConfig.SetConfigFile(f.Name()) mockConfig.SetWithoutSource("auth_token", "") - return filepath.Join(testDir, "auth_token") + return mockConfig, filepath.Join(testDir, "auth_token") } func TestCreateOrFetchAuthTokenValidGen(t *testing.T) { - expectTokenPath := initMockConf(t) - token, err := CreateOrFetchToken() + config, expectTokenPath := initMockConf(t) + token, err := CreateOrFetchToken(config) require.Nil(t, err, fmt.Sprintf("%v", err)) assert.True(t, len(token) > authTokenMinimalLen, fmt.Sprintf("%d", len(token))) _, err = os.Stat(expectTokenPath) @@ -45,21 +46,21 @@ func TestCreateOrFetchAuthTokenValidGen(t *testing.T) { } func TestFetchAuthToken(t *testing.T) { - expectTokenPath := initMockConf(t) + config, expectTokenPath := initMockConf(t) - token, err := FetchAuthToken() + token, err := FetchAuthToken(config) require.NotNil(t, err) require.Equal(t, "", token) _, err = os.Stat(expectTokenPath) require.True(t, os.IsNotExist(err)) - newToken, err := CreateOrFetchToken() + newToken, err := CreateOrFetchToken(config) require.Nil(t, err, fmt.Sprintf("%v", err)) require.True(t, len(newToken) > authTokenMinimalLen, fmt.Sprintf("%d", len(newToken))) _, err = os.Stat(expectTokenPath) require.Nil(t, err) - token, err = FetchAuthToken() + token, err = FetchAuthToken(config) require.Nil(t, err, fmt.Sprintf("%v", err)) require.Equal(t, newToken, token) } diff --git a/pkg/api/util/util.go b/pkg/api/util/util.go index 0a4befebbc8cd6..cff42139a45c01 100644 --- a/pkg/api/util/util.go +++ b/pkg/api/util/util.go @@ -13,6 +13,7 @@ import ( "strings" "github.com/DataDog/datadog-agent/pkg/api/security" + "github.com/DataDog/datadog-agent/pkg/config/model" ) var ( @@ -22,7 +23,7 @@ var ( // SetAuthToken sets the session token // Requires that the config has been set up before calling -func SetAuthToken() error { +func SetAuthToken(config model.Reader) error { // Noop if token is already set if token != "" { return nil @@ -30,13 +31,13 @@ func SetAuthToken() error { // token is only set once, no need to mutex protect var err error - token, err = security.FetchAuthToken() + token, err = security.FetchAuthToken(config) return err } // CreateAndSetAuthToken creates and sets the authorization token // Requires that the config has been set up before calling -func CreateAndSetAuthToken() error { +func CreateAndSetAuthToken(config model.Reader) error { // Noop if token is already set if token != "" { return nil @@ -44,7 +45,7 @@ func CreateAndSetAuthToken() error { // token is only set once, no need to mutex protect var err error - token, err = security.CreateOrFetchToken() + token, err = security.CreateOrFetchToken(config) return err } @@ -55,7 +56,7 @@ func GetAuthToken() string { // InitDCAAuthToken initialize the session token for the Cluster Agent based on config options // Requires that the config has been set up before calling -func InitDCAAuthToken() error { +func InitDCAAuthToken(config model.Reader) error { // Noop if dcaToken is already set if dcaToken != "" { return nil @@ -63,7 +64,7 @@ func InitDCAAuthToken() error { // dcaToken is only set once, no need to mutex protect var err error - dcaToken, err = security.CreateOrGetClusterAgentAuthToken() + dcaToken, err = security.CreateOrGetClusterAgentAuthToken(config) return err } diff --git a/pkg/cli/standalone/jmx.go b/pkg/cli/standalone/jmx.go index ee6dd5ef1db08a..3e80263652f4ec 100644 --- a/pkg/cli/standalone/jmx.go +++ b/pkg/cli/standalone/jmx.go @@ -59,7 +59,7 @@ func ExecJmxListWithRateMetricsJSON(selectedChecks []string, logLevel string, co // The common utils, including AutoConfig, must have already been initialized. func execJmxCommand(command string, selectedChecks []string, reporter jmxfetch.JMXReporter, output func(...interface{}), logLevel string, configs []integration.Config, wmeta workloadmeta.Component, taggerComp tagger.Component, senderManager sender.DiagnoseSenderManager, agentAPI internalAPI.Component) error { // start the cmd HTTP server - if err := agentAPI.StartServer(nil, wmeta, taggerComp, optional.NewNoneOption[logsAgent.Component](), senderManager); err != nil { + if err := agentAPI.StartServer(wmeta, taggerComp, optional.NewNoneOption[logsAgent.Component](), senderManager); err != nil { return fmt.Errorf("Error while starting api server, exiting: %v", err) } diff --git a/pkg/cli/subcommands/check/command.go b/pkg/cli/subcommands/check/command.go index d7f71d41b8dde7..aa1e144f0a42a8 100644 --- a/pkg/cli/subcommands/check/command.go +++ b/pkg/cli/subcommands/check/command.go @@ -12,6 +12,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" "os" "path/filepath" "runtime" @@ -206,6 +207,7 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { fx.Provide(func() inventoryagent.Component { return nil }), fx.Provide(func() inventoryhost.Component { return nil }), fx.Provide(func() packagesigning.Component { return nil }), + fx.Provide(func() optional.Option[rcservice.Component] { return optional.NewNoneOption[rcservice.Component]() }), ) }, } diff --git a/pkg/cli/subcommands/clusterchecks/command.go b/pkg/cli/subcommands/clusterchecks/command.go index 896efd2c55c136..14d46dc2c79433 100644 --- a/pkg/cli/subcommands/clusterchecks/command.go +++ b/pkg/cli/subcommands/clusterchecks/command.go @@ -126,14 +126,14 @@ func run(log log.Component, config config.Component, cliParams *cliParams) error return flare.GetEndpointsChecks(color.Output, cliParams.checkName) } -func rebalance(_ log.Component, _ config.Component, cliParams *cliParams) error { +func rebalance(_ log.Component, config config.Component, cliParams *cliParams) error { fmt.Println("Requesting a cluster check rebalance...") c := util.GetClient(false) // FIX: get certificates right then make this true urlstr := fmt.Sprintf("https://localhost:%v/api/v1/clusterchecks/rebalance", pkgconfig.Datadog.GetInt("cluster_agent.cmd_port")) // Set session token - err := util.SetAuthToken() + err := util.SetAuthToken(config) if err != nil { return err } @@ -178,7 +178,7 @@ func rebalance(_ log.Component, _ config.Component, cliParams *cliParams) error return nil } -func isolate(_ log.Component, _ config.Component, cliParams *cliParams) error { +func isolate(_ log.Component, config config.Component, cliParams *cliParams) error { c := util.GetClient(false) // FIX: get certificates right then make this true if cliParams.checkID == "" { return fmt.Errorf("checkID must be specified") @@ -186,7 +186,7 @@ func isolate(_ log.Component, _ config.Component, cliParams *cliParams) error { urlstr := fmt.Sprintf("https://localhost:%v/api/v1/clusterchecks/isolate/check/%s", pkgconfig.Datadog.GetInt("cluster_agent.cmd_port"), cliParams.checkID) // Set session token - err := util.SetAuthToken() + err := util.SetAuthToken(config) if err != nil { return err } diff --git a/pkg/cli/subcommands/config/command.go b/pkg/cli/subcommands/config/command.go index 1d44c7e284373e..fc463e2bad59c6 100644 --- a/pkg/cli/subcommands/config/command.go +++ b/pkg/cli/subcommands/config/command.go @@ -100,8 +100,8 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { return cmd } -func showRuntimeConfiguration(_ log.Component, _ config.Component, cliParams *cliParams) error { - err := util.SetAuthToken() +func showRuntimeConfiguration(_ log.Component, config config.Component, cliParams *cliParams) error { + err := util.SetAuthToken(config) if err != nil { return err } @@ -121,8 +121,8 @@ func showRuntimeConfiguration(_ log.Component, _ config.Component, cliParams *cl return nil } -func listRuntimeConfigurableValue(_ log.Component, _ config.Component, cliParams *cliParams) error { - err := util.SetAuthToken() +func listRuntimeConfigurableValue(_ log.Component, config config.Component, cliParams *cliParams) error { + err := util.SetAuthToken(config) if err != nil { return err } @@ -147,12 +147,12 @@ func listRuntimeConfigurableValue(_ log.Component, _ config.Component, cliParams return nil } -func setConfigValue(_ log.Component, _ config.Component, cliParams *cliParams) error { +func setConfigValue(_ log.Component, config config.Component, cliParams *cliParams) error { if len(cliParams.args) != 2 { return fmt.Errorf("exactly two parameters are required: the setting name and its value") } - err := util.SetAuthToken() + err := util.SetAuthToken(config) if err != nil { return err } @@ -176,12 +176,12 @@ func setConfigValue(_ log.Component, _ config.Component, cliParams *cliParams) e return nil } -func getConfigValue(_ log.Component, _ config.Component, cliParams *cliParams) error { +func getConfigValue(_ log.Component, config config.Component, cliParams *cliParams) error { if len(cliParams.args) != 1 { return fmt.Errorf("a single setting name must be specified") } - err := util.SetAuthToken() + err := util.SetAuthToken(config) if err != nil { return err } diff --git a/pkg/cli/subcommands/dcaflare/command.go b/pkg/cli/subcommands/dcaflare/command.go index 9f8773ee8605e6..aaddd9ef72054c 100644 --- a/pkg/cli/subcommands/dcaflare/command.go +++ b/pkg/cli/subcommands/dcaflare/command.go @@ -190,7 +190,7 @@ func run(cliParams *cliParams, diagnoseSenderManager diagnosesendermanager.Compo return nil } - if e = util.SetAuthToken(); e != nil { + if e = util.SetAuthToken(pkgconfig.Datadog); e != nil { return e } diff --git a/pkg/cli/subcommands/health/command.go b/pkg/cli/subcommands/health/command.go index f22bcf860b4a1c..f4ced3ea6e34e7 100644 --- a/pkg/cli/subcommands/health/command.go +++ b/pkg/cli/subcommands/health/command.go @@ -68,7 +68,7 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { return cmd } -func requestHealth(_ log.Component, _ config.Component, cliParams *cliParams) error { +func requestHealth(_ log.Component, config config.Component, cliParams *cliParams) error { c := util.GetClient(false) // FIX: get certificates right then make this true ipcAddress, err := pkgconfig.GetIPCAddress() @@ -84,7 +84,7 @@ func requestHealth(_ log.Component, _ config.Component, cliParams *cliParams) er } // Set session token - err = util.SetAuthToken() + err = util.SetAuthToken(config) if err != nil { return err } diff --git a/pkg/cli/subcommands/taggerlist/command.go b/pkg/cli/subcommands/taggerlist/command.go index 0d37ce681c28d8..f8bd826a4cb07d 100644 --- a/pkg/cli/subcommands/taggerlist/command.go +++ b/pkg/cli/subcommands/taggerlist/command.go @@ -69,7 +69,7 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { func taggerList(_ log.Component, config config.Component, _ *cliParams) error { // Set session token - if err := util.SetAuthToken(); err != nil { + if err := util.SetAuthToken(config); err != nil { return err } diff --git a/pkg/cli/subcommands/workloadlist/command.go b/pkg/cli/subcommands/workloadlist/command.go index 1d9ab0c7c552fb..1478adb7fe757e 100644 --- a/pkg/cli/subcommands/workloadlist/command.go +++ b/pkg/cli/subcommands/workloadlist/command.go @@ -74,11 +74,11 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { return workloadListCommand } -func workloadList(_ log.Component, _ config.Component, cliParams *cliParams) error { +func workloadList(_ log.Component, config config.Component, cliParams *cliParams) error { c := util.GetClient(false) // FIX: get certificates right then make this true // Set session token - err := util.SetAuthToken() + err := util.SetAuthToken(config) if err != nil { return err } diff --git a/pkg/clusteragent/admission/mutate/auto_instrumentation.go b/pkg/clusteragent/admission/mutate/auto_instrumentation.go index ba53735f82086c..05d33f63a39fd1 100644 --- a/pkg/clusteragent/admission/mutate/auto_instrumentation.go +++ b/pkg/clusteragent/admission/mutate/auto_instrumentation.go @@ -614,12 +614,10 @@ func getServiceNameFromPod(pod *corev1.Pod) (string, error) { // when no other config has been provided. func basicConfig() common.LibConfig { return common.LibConfig{ - Tracing: pointer.Ptr(true), - LogInjection: pointer.Ptr(true), - HealthMetrics: pointer.Ptr(true), - RuntimeMetrics: pointer.Ptr(true), - TracingSamplingRate: pointer.Ptr(1.0), - TracingRateLimit: pointer.Ptr(100), + Tracing: pointer.Ptr(true), + LogInjection: pointer.Ptr(true), + HealthMetrics: pointer.Ptr(true), + RuntimeMetrics: pointer.Ptr(true), } } diff --git a/pkg/clusteragent/admission/mutate/auto_instrumentation_test.go b/pkg/clusteragent/admission/mutate/auto_instrumentation_test.go index 91d697a67b8826..c19a9c59c5b6fb 100644 --- a/pkg/clusteragent/admission/mutate/auto_instrumentation_test.go +++ b/pkg/clusteragent/admission/mutate/auto_instrumentation_test.go @@ -785,14 +785,6 @@ func TestInjectLibInitContainer(t *testing.T) { func expBasicConfig() []corev1.EnvVar { return []corev1.EnvVar{ - { - Name: "DD_TRACE_RATE_LIMIT", - Value: "100", - }, - { - Name: "DD_TRACE_SAMPLE_RATE", - Value: "1.00", - }, { Name: "DD_RUNTIME_METRICS_ENABLED", Value: "true", diff --git a/pkg/config/aliases_darwin.go b/pkg/config/aliases_darwin.go index 50065415c35db2..17dc1cb3a0f760 100644 --- a/pkg/config/aliases_darwin.go +++ b/pkg/config/aliases_darwin.go @@ -11,6 +11,7 @@ import ( // Aliases to setup package const ( + DefaultUpdaterLogFile = pkgconfigsetup.DefaultUpdaterLogFile DefaultSecurityAgentLogFile = pkgconfigsetup.DefaultSecurityAgentLogFile DefaultProcessAgentLogFile = pkgconfigsetup.DefaultProcessAgentLogFile DefaultDDAgentBin = pkgconfigsetup.DefaultDDAgentBin diff --git a/pkg/config/aliases_nix.go b/pkg/config/aliases_nix.go index a753187405d336..4bbce899be6bd3 100644 --- a/pkg/config/aliases_nix.go +++ b/pkg/config/aliases_nix.go @@ -13,6 +13,7 @@ import ( // Aliases to setup package var ( + DefaultUpdaterLogFile = pkgconfigsetup.DefaultUpdaterLogFile DefaultSecurityAgentLogFile = pkgconfigsetup.DefaultSecurityAgentLogFile DefaultProcessAgentLogFile = pkgconfigsetup.DefaultProcessAgentLogFile DefaultDDAgentBin = pkgconfigsetup.DefaultDDAgentBin diff --git a/pkg/config/aliases_windows.go b/pkg/config/aliases_windows.go index eccac48a83b763..cab0f23bfedbd6 100644 --- a/pkg/config/aliases_windows.go +++ b/pkg/config/aliases_windows.go @@ -11,6 +11,7 @@ import ( // Aliases to setup package var ( + DefaultUpdaterLogFile = pkgconfigsetup.DefaultUpdaterLogFile DefaultSecurityAgentLogFile = pkgconfigsetup.DefaultSecurityAgentLogFile DefaultProcessAgentLogFile = pkgconfigsetup.DefaultProcessAgentLogFile DefaultDDAgentBin = pkgconfigsetup.DefaultDDAgentBin diff --git a/pkg/config/fetcher/from_processes.go b/pkg/config/fetcher/from_processes.go index 0c1fff65e35bb2..703b1a92f35c77 100644 --- a/pkg/config/fetcher/from_processes.go +++ b/pkg/config/fetcher/from_processes.go @@ -18,7 +18,7 @@ import ( // SecurityAgentConfig fetch the configuration from the security-agent process by querying its HTTPS API func SecurityAgentConfig(config config.Reader) (string, error) { - err := util.SetAuthToken() + err := util.SetAuthToken(config) if err != nil { return "", err } @@ -32,7 +32,7 @@ func SecurityAgentConfig(config config.Reader) (string, error) { // TraceAgentConfig fetch the configuration from the trace-agent process by querying its HTTPS API func TraceAgentConfig(config config.Reader) (string, error) { - err := util.SetAuthToken() + err := util.SetAuthToken(config) if err != nil { return "", err } @@ -52,7 +52,7 @@ func TraceAgentConfig(config config.Reader) (string, error) { // ProcessAgentConfig fetch the configuration from the process-agent process by querying its HTTPS API func ProcessAgentConfig(config config.Reader, getEntireConfig bool) (string, error) { - err := util.SetAuthToken() + err := util.SetAuthToken(config) if err != nil { return "", err } diff --git a/pkg/config/remote/service/service.go b/pkg/config/remote/service/service.go index 80bebc7cd03805..e48c51b014f171 100644 --- a/pkg/config/remote/service/service.go +++ b/pkg/config/remote/service/service.go @@ -89,8 +89,9 @@ type Service struct { // The number of errors we're currently tracking within the context of our backoff policy backoffErrorCount int - // Handle to stop the services main goroutine - cancel context.CancelFunc + // Channels to stop the services main goroutines + stopOrgPoller chan struct{} + stopConfigPoller chan struct{} clock clock.Clock hostname string @@ -287,6 +288,8 @@ func NewService(cfg model.Reader, apiKey, baseRawURL, hostname string, tags []st }, telemetryReporter: telemetryReporter, agentVersion: agentVersion, + stopOrgPoller: make(chan struct{}), + stopConfigPoller: make(chan struct{}), } for _, opt := range opts { @@ -305,22 +308,23 @@ func newRCBackendOrgUUIDProvider(http api.API) uptane.OrgUUIDProvider { } // Start the remote configuration management service -func (s *Service) Start(ctx context.Context) { - ctx, cancel := context.WithCancel(ctx) - s.cancel = cancel +func (s *Service) Start() { go func() { s.pollOrgStatus() for { select { case <-s.clock.After(orgStatusPollInterval): s.pollOrgStatus() - case <-ctx.Done(): + case <-s.stopOrgPoller: + log.Infof("Stopping Remote Config org status poller") return } } }() go func() { - defer cancel() + defer func() { + close(s.stopOrgPoller) + }() err := s.refresh() if err != nil { @@ -345,7 +349,8 @@ func (s *Service) Start(ctx context.Context) { s.telemetryReporter.IncRateLimit() } close(response) - case <-ctx.Done(): + case <-s.stopConfigPoller: + log.Infof("Stopping Remote Config configuration poller") return } @@ -363,8 +368,8 @@ func (s *Service) Start(ctx context.Context) { // Stop stops the refresh loop and closes the on-disk DB cache func (s *Service) Stop() error { - if s.cancel != nil { - s.cancel() + if s.stopConfigPoller != nil { + close(s.stopConfigPoller) } return s.db.Close() diff --git a/pkg/config/settings/runtime_profiling.go b/pkg/config/settings/runtime_profiling.go index cc27cdf803ed69..30f3c91c1eb390 100644 --- a/pkg/config/settings/runtime_profiling.go +++ b/pkg/config/settings/runtime_profiling.go @@ -11,6 +11,7 @@ import ( "github.com/fatih/color" "github.com/DataDog/datadog-agent/pkg/api/util" + "github.com/DataDog/datadog-agent/pkg/config" ) // ProfilingOpts defines the options used for profiling @@ -23,7 +24,7 @@ type ProfilingOpts struct { // ExecWithRuntimeProfilingSettings runs the callback func with the given runtime profiling settings func ExecWithRuntimeProfilingSettings(callback func(), opts ProfilingOpts, settingsClient Client) error { - if err := util.SetAuthToken(); err != nil { + if err := util.SetAuthToken(config.Datadog); err != nil { return fmt.Errorf("unable to set up authentication token: %v", err) } diff --git a/pkg/config/setup/config_darwin.go b/pkg/config/setup/config_darwin.go index 4f9970b97fe4be..8bf5683bfb6a12 100644 --- a/pkg/config/setup/config_darwin.go +++ b/pkg/config/setup/config_darwin.go @@ -10,6 +10,8 @@ const ( defaultAdditionalChecksPath = "/opt/datadog-agent/etc/checks.d" defaultRunPath = "/opt/datadog-agent/run" defaultGuiPort = 5002 + // DefaultUpdaterLogFile is the default updater log file + DefaultUpdaterLogFile = "/opt/datadog-agent/logs/updater.log" // DefaultSecurityAgentLogFile points to the log file that will be used by the security-agent if not configured DefaultSecurityAgentLogFile = "/opt/datadog-agent/logs/security-agent.log" // DefaultProcessAgentLogFile is the default process-agent log file diff --git a/pkg/config/setup/config_nix.go b/pkg/config/setup/config_nix.go index dd7939629a4ed3..46c7b9b247f575 100644 --- a/pkg/config/setup/config_nix.go +++ b/pkg/config/setup/config_nix.go @@ -28,6 +28,8 @@ const ( defaultConfdPath = "/etc/datadog-agent/conf.d" defaultAdditionalChecksPath = "/etc/datadog-agent/checks.d" defaultGuiPort = -1 + // DefaultUpdaterLogFile is the default updater log file + DefaultUpdaterLogFile = "/var/log/datadog/updater.log" // DefaultSecurityAgentLogFile points to the log file that will be used by the security-agent if not configured DefaultSecurityAgentLogFile = "/var/log/datadog/security-agent.log" // DefaultProcessAgentLogFile is the default process-agent log file diff --git a/pkg/config/setup/config_windows.go b/pkg/config/setup/config_windows.go index ce061bcb8f8d4b..60a96564802c58 100644 --- a/pkg/config/setup/config_windows.go +++ b/pkg/config/setup/config_windows.go @@ -18,6 +18,8 @@ var ( defaultAdditionalChecksPath = "c:\\programdata\\datadog\\checks.d" defaultRunPath = "c:\\programdata\\datadog\\run" defaultGuiPort = 5002 + // DefaultUpdaterLogFile is the default updater log file + DefaultUpdaterLogFile = "c:\\programdata\\datadog\\logs\\updater.log" // DefaultSecurityAgentLogFile points to the log file that will be used by the security-agent if not configured DefaultSecurityAgentLogFile = "c:\\programdata\\datadog\\logs\\security-agent.log" // DefaultProcessAgentLogFile is the default process-agent log file diff --git a/pkg/config/setup/system_probe.go b/pkg/config/setup/system_probe.go index 16eaa7a1b3ff10..b36d388e591cca 100644 --- a/pkg/config/setup/system_probe.go +++ b/pkg/config/setup/system_probe.go @@ -314,6 +314,7 @@ func InitSystemProbeConfig(cfg pkgconfigmodel.Config) { // event monitoring cfg.BindEnvAndSetDefault(join(evNS, "process", "enabled"), false, "DD_SYSTEM_PROBE_EVENT_MONITORING_PROCESS_ENABLED") cfg.BindEnvAndSetDefault(join(evNS, "network_process", "enabled"), true, "DD_SYSTEM_PROBE_EVENT_MONITORING_NETWORK_PROCESS_ENABLED") + eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "enable_all_probes"), false) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "enable_kernel_filters"), true) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "flush_discarder_window"), 3) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "pid_cache_size"), 10000) diff --git a/pkg/diagnose/runner.go b/pkg/diagnose/runner.go index 535e24f24836a5..064d91bdae643b 100644 --- a/pkg/diagnose/runner.go +++ b/pkg/diagnose/runner.go @@ -298,7 +298,7 @@ func requestDiagnosesFromAgentProcess(diagCfg diagnosis.Config) ([]diagnosis.Dia } // Make sure we have a session token (for privileged information) - if err = util.SetAuthToken(); err != nil { + if err = util.SetAuthToken(pkgconfig.Datadog); err != nil { return nil, fmt.Errorf("auth error: %w", err) } diff --git a/pkg/flare/archive.go b/pkg/flare/archive.go index acdefa90ff387e..00aa040f807933 100644 --- a/pkg/flare/archive.go +++ b/pkg/flare/archive.go @@ -71,7 +71,7 @@ func CompleteFlare(fb flaretypes.FlareBuilder, senderManager sender.DiagnoseSend getProcessChecks(fb, config.GetProcessAPIAddressPort) } - fb.RegisterFilePerm(security.GetAuthTokenFilepath()) + fb.RegisterFilePerm(security.GetAuthTokenFilepath(config.Datadog)) systemProbeConfigBPFDir := config.SystemProbe.GetString("system_probe_config.bpf_dir") if systemProbeConfigBPFDir != "" { diff --git a/pkg/flare/cluster_checks.go b/pkg/flare/cluster_checks.go index 0d58ae023173d2..d235c7407296a6 100644 --- a/pkg/flare/cluster_checks.go +++ b/pkg/flare/cluster_checks.go @@ -36,7 +36,7 @@ func GetClusterChecks(w io.Writer, checkName string) error { c := util.GetClient(false) // FIX: get certificates right then make this true // Set session token - err := util.SetAuthToken() + err := util.SetAuthToken(config.Datadog) if err != nil { return err } @@ -123,7 +123,7 @@ func GetEndpointsChecks(w io.Writer, checkName string) error { c := util.GetClient(false) // FIX: get certificates right then make this true // Set session token - if err := util.SetAuthToken(); err != nil { + if err := util.SetAuthToken(config.Datadog); err != nil { return err } diff --git a/pkg/flare/config_check.go b/pkg/flare/config_check.go index 9072818324e3c4..2d9265bfde386d 100644 --- a/pkg/flare/config_check.go +++ b/pkg/flare/config_check.go @@ -32,7 +32,7 @@ func GetConfigCheck(w io.Writer, withDebug bool) error { c := util.GetClient(false) // FIX: get certificates right then make this true // Set session token - err := util.SetAuthToken() + err := util.SetAuthToken(config.Datadog) if err != nil { return err } diff --git a/pkg/flare/remote_config.go b/pkg/flare/remote_config.go index 6db2b87a8b5f7f..52bbb8d59d506c 100644 --- a/pkg/flare/remote_config.go +++ b/pkg/flare/remote_config.go @@ -36,7 +36,7 @@ func exportRemoteConfig(fb flaretypes.FlareBuilder) error { } // Dump the state - token, err := security.FetchAuthToken() + token, err := security.FetchAuthToken(config.Datadog) if err != nil { return fmt.Errorf("Couldn't get auth token: %v", err) } diff --git a/pkg/gohai/go.mod b/pkg/gohai/go.mod index 6462344a6c5424..fe13b37258ac86 100644 --- a/pkg/gohai/go.mod +++ b/pkg/gohai/go.mod @@ -10,7 +10,7 @@ require ( github.com/moby/sys/mountinfo v0.7.1 github.com/shirou/gopsutil/v3 v3.24.1 github.com/stretchr/testify v1.8.4 - golang.org/x/sys v0.16.0 + golang.org/x/sys v0.17.0 ) require ( diff --git a/pkg/gohai/go.sum b/pkg/gohai/go.sum index b378ea00f53402..d01dc9870640d3 100644 --- a/pkg/gohai/go.sum +++ b/pkg/gohai/go.sum @@ -59,8 +59,9 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/pkg/network/events/monitor_test.go b/pkg/network/events/monitor_test.go index 045ac042dda8b6..37b1abe96cd5cd 100644 --- a/pkg/network/events/monitor_test.go +++ b/pkg/network/events/monitor_test.go @@ -40,7 +40,7 @@ func TestEventHandlerWrapperCopy(t *testing.T) { }, }, ContainerContext: &model.ContainerContext{ID: "cid_exec"}, - FieldHandlers: &model.DefaultFieldHandlers{}, + FieldHandlers: &model.FakeFieldHandlers{}, }} evHandler := &eventHandlerWrapper{} _p := evHandler.Copy(ev) @@ -76,7 +76,7 @@ func TestEventHandlerWrapperCopy(t *testing.T) { }, }, ContainerContext: &model.ContainerContext{ID: "cid_fork"}, - FieldHandlers: &model.DefaultFieldHandlers{}, + FieldHandlers: &model.FakeFieldHandlers{}, }} evHandler := &eventHandlerWrapper{} _p := evHandler.Copy(ev) diff --git a/pkg/obfuscate/go.mod b/pkg/obfuscate/go.mod index 474ef98fce7d5f..904646ad15ad95 100644 --- a/pkg/obfuscate/go.mod +++ b/pkg/obfuscate/go.mod @@ -1,6 +1,6 @@ module github.com/DataDog/datadog-agent/pkg/obfuscate -go 1.20 +go 1.21 require ( github.com/DataDog/datadog-go/v5 v5.1.1 diff --git a/pkg/obfuscate/go.sum b/pkg/obfuscate/go.sum index 8f60e065253ad2..522e7a4a06e1f3 100644 --- a/pkg/obfuscate/go.sum +++ b/pkg/obfuscate/go.sum @@ -24,6 +24,7 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= diff --git a/pkg/process/metadata/parser/service.go b/pkg/process/metadata/parser/service.go index 7db19c9cb65781..0515a6a6553559 100644 --- a/pkg/process/metadata/parser/service.go +++ b/pkg/process/metadata/parser/service.go @@ -6,12 +6,13 @@ package parser import ( - "golang.org/x/exp/slices" "path/filepath" "runtime" + "slices" "strings" "unicode" + "github.com/Masterminds/semver" "github.com/cihub/seelog" ddconfig "github.com/DataDog/datadog-agent/pkg/config" @@ -23,9 +24,16 @@ import ( type serviceExtractorFn func(args []string) string const ( - javaJarFlag = "-jar" - javaJarExtension = ".jar" - javaApachePrefix = "org.apache." + javaJarFlag = "-jar" + javaJarExtension = ".jar" + javaModuleFlag = "--module" + javaModuleFlagShort = "-m" + javaSnapshotSuffix = "-SNAPSHOT" + javaApachePrefix = "org.apache." +) + +var ( + javaAllowedFlags = []string{javaJarFlag, javaModuleFlag, javaModuleFlagShort} ) // List of binaries that usually have additional process context of whats running @@ -295,7 +303,18 @@ func parseCommandContextJava(args []string) string { if arg = trimColonRight(arg); isRuneLetterAt(arg, 0) { if strings.HasSuffix(arg, javaJarExtension) { - return arg[:len(arg)-len(javaJarExtension)] + jarName := arg[:len(arg)-len(javaJarExtension)] + if !strings.HasSuffix(jarName, javaSnapshotSuffix) { + return jarName + } + jarName = jarName[:len(jarName)-len(javaSnapshotSuffix)] + + if idx := strings.LastIndex(jarName, "-"); idx != -1 { + if _, err := semver.NewVersion(jarName[idx+1:]); err == nil { + return jarName[:idx] + } + } + return jarName } if strings.HasPrefix(arg, javaApachePrefix) { @@ -315,8 +334,8 @@ func parseCommandContextJava(args []string) string { } } - prevArgIsFlag = hasFlagPrefix && !includesAssignment && a != javaJarFlag + prevArgIsFlag = hasFlagPrefix && !includesAssignment && !slices.Contains(javaAllowedFlags, a) } - return "" + return "java" } diff --git a/pkg/process/metadata/parser/service_test.go b/pkg/process/metadata/parser/service_test.go index bfdc56565ac1b9..ff1ae8f5e37519 100644 --- a/pkg/process/metadata/parser/service_test.go +++ b/pkg/process/metadata/parser/service_test.go @@ -110,6 +110,47 @@ func TestExtractServiceMetadata(t *testing.T) { }, expectedServiceTag: "process_context:cassandra", }, + { + name: "java with -m flag", + cmdline: []string{ + "java", "-Des.networkaddress.cache.ttl=60", "-Des.networkaddress.cache.negative.ttl=10", "-Djava.security.manager=allow", "-XX:+AlwaysPreTouch", + "-Xss1m", "-Djava.awt.headless=true", "-Dfile.encoding=UTF-8", "-Djna.nosys=true", "-XX:-OmitStackTraceInFastThrow", "-Dio.netty.noUnsafe=true", + "-Dio.netty.noKeySetOptimization=true", "-Dio.netty.recycler.maxCapacityPerThread=0", "-Dlog4j.shutdownHookEnabled=false", "-Dlog4j2.disable.jmx=true", + "-Dlog4j2.formatMsgNoLookups=true", "-Djava.locale.providers=SPI,COMPAT", "--add-opens=java.base/java.io=org.elasticsearch.preallocate", + "-XX:+UseG1GC", "-Djava.io.tmpdir=/tmp/elasticsearch-11638915669270544049", "-XX:+HeapDumpOnOutOfMemoryError", "-XX:+ExitOnOutOfMemoryError", + "-XX:HeapDumpPath=data", "-XX:ErrorFile=logs/hs_err_pid%p.log", "-Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,level,pid,tags:filecount=32,filesize=64m", + "-Des.cgroups.hierarchy.override=/", "-XX:ActiveProcessorCount=1", "-Djava.net.preferIPv4Stack=true", "-XX:-HeapDumpOnOutOfMemoryError", "-Xms786m", "-Xmx786m", + "-XX:MaxDirectMemorySize=412090368", "-XX:G1HeapRegionSize=4m", "-XX:InitiatingHeapOccupancyPercent=30", "-XX:G1ReservePercent=15", "-Des.distribution.type=tar", + "--module-path", "/usr/share/elasticsearch/lib", "--add-modules=jdk.net", "--add-modules=org.elasticsearch.preallocate", "-m", + "org.elasticsearch.server/org.elasticsearch.bootstrap.Elasticsearch", + }, + expectedServiceTag: "process_context:Elasticsearch", + }, + { + name: "java with --module flag", + cmdline: []string{ + "java", "-Des.networkaddress.cache.ttl=60", "-Des.networkaddress.cache.negative.ttl=10", "-Djava.security.manager=allow", "-XX:+AlwaysPreTouch", + "-Xss1m", "-Djava.awt.headless=true", "-Dfile.encoding=UTF-8", "-Djna.nosys=true", "-XX:-OmitStackTraceInFastThrow", "-Dio.netty.noUnsafe=true", + "-Dio.netty.noKeySetOptimization=true", "-Dio.netty.recycler.maxCapacityPerThread=0", "-Dlog4j.shutdownHookEnabled=false", "-Dlog4j2.disable.jmx=true", + "-Dlog4j2.formatMsgNoLookups=true", "-Djava.locale.providers=SPI,COMPAT", "--add-opens=java.base/java.io=org.elasticsearch.preallocate", + "-XX:+UseG1GC", "-Djava.io.tmpdir=/tmp/elasticsearch-11638915669270544049", "-XX:+HeapDumpOnOutOfMemoryError", "-XX:+ExitOnOutOfMemoryError", + "-XX:HeapDumpPath=data", "-XX:ErrorFile=logs/hs_err_pid%p.log", "-Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,level,pid,tags:filecount=32,filesize=64m", + "-Des.cgroups.hierarchy.override=/", "-XX:ActiveProcessorCount=1", "-Djava.net.preferIPv4Stack=true", "-XX:-HeapDumpOnOutOfMemoryError", "-Xms786m", "-Xmx786m", + "-XX:MaxDirectMemorySize=412090368", "-XX:G1HeapRegionSize=4m", "-XX:InitiatingHeapOccupancyPercent=30", "-XX:G1ReservePercent=15", "-Des.distribution.type=tar", + "--module-path", "/usr/share/elasticsearch/lib", "--add-modules=jdk.net", "--add-modules=org.elasticsearch.preallocate", "--module", + "org.elasticsearch.server/org.elasticsearch.bootstrap.Elasticsearch", + }, + expectedServiceTag: "process_context:Elasticsearch", + }, + { + name: "java with --module flag without main class", + cmdline: []string{ + "java", "-Des.networkaddress.cache.ttl=60", "-Des.networkaddress.cache.negative.ttl=10", "-Djava.security.manager=allow", "-XX:+AlwaysPreTouch", + "--module-path", "/usr/share/elasticsearch/lib", "--add-modules=jdk.net", "--add-modules=org.elasticsearch.preallocate", "--module", + "org.elasticsearch.server", + }, + expectedServiceTag: "process_context:server", + }, { name: "java space in java executable path", cmdline: []string{ @@ -129,6 +170,50 @@ func TestExtractServiceMetadata(t *testing.T) { }, expectedServiceTag: "process_context:myservice", }, + { + name: "java with unknown flags", + cmdline: []string{ + "java", "-Des.networkaddress.cache.ttl=60", "-Des.networkaddress.cache.negative.ttl=10", + "-Djava.security.manager=allow", "-XX:+AlwaysPreTouch", "-Xss1m", + }, + expectedServiceTag: "process_context:java", + }, + { + name: "java jar with snapshot", + cmdline: []string{ + "/usr/lib/jvm/java-1.17.0-openjdk-amd64/bin/java", "-Dsun.misc.URLClassPath.disableJarChecking=true", + "-Xms1024m", "-Xmx1024m", "-Dlogging.config=file:/usr/local/test/etc/logback-spring-datadog.xml", + "-Dlog4j2.formatMsgNoLookups=true", "-javaagent:/opt/datadog-agent/dd-java-agent.jar", + "-Ddd.profiling.enabled=true", "-Ddd.logs.injection=true", "-Ddd.trace.propagation.style.inject=datadog,b3multi", + "-Ddd.rabbitmq.legacy.tracing.enabled=false", "-jar", + "/usr/local/test/app/myservice-core-1.1.15-SNAPSHOT.jar", "--spring.profiles.active=test", + }, + expectedServiceTag: "process_context:myservice-core", + }, + { + name: "java jar with snapshot with another version", + cmdline: []string{ + "/usr/lib/jvm/java-1.17.0-openjdk-amd64/bin/java", "-Dsun.misc.URLClassPath.disableJarChecking=true", + "-Xms1024m", "-Xmx1024m", "-Dlogging.config=file:/usr/local/test/etc/logback-spring-datadog.xml", + "-Dlog4j2.formatMsgNoLookups=true", "-javaagent:/opt/datadog-agent/dd-java-agent.jar", + "-Ddd.profiling.enabled=true", "-Ddd.logs.injection=true", "-Ddd.trace.propagation.style.inject=datadog,b3multi", + "-Ddd.rabbitmq.legacy.tracing.enabled=false", "-jar", + "/usr/local/test/app/myservice-core-1-SNAPSHOT.jar", "--spring.profiles.active=test", + }, + expectedServiceTag: "process_context:myservice-core", + }, + { + name: "java jar with snapshot without version", + cmdline: []string{ + "/usr/lib/jvm/java-1.17.0-openjdk-amd64/bin/java", "-Dsun.misc.URLClassPath.disableJarChecking=true", + "-Xms1024m", "-Xmx1024m", "-Dlogging.config=file:/usr/local/test/etc/logback-spring-datadog.xml", + "-Dlog4j2.formatMsgNoLookups=true", "-javaagent:/opt/datadog-agent/dd-java-agent.jar", + "-Ddd.profiling.enabled=true", "-Ddd.logs.injection=true", "-Ddd.trace.propagation.style.inject=datadog,b3multi", + "-Ddd.rabbitmq.legacy.tracing.enabled=false", "-jar", + "/usr/local/test/app/myservice-core-SNAPSHOT.jar", "--spring.profiles.active=test", + }, + expectedServiceTag: "process_context:myservice-core", + }, } for _, tt := range tests { diff --git a/pkg/proto/go.mod b/pkg/proto/go.mod index 334fc22d2bcb19..2337afc117014a 100644 --- a/pkg/proto/go.mod +++ b/pkg/proto/go.mod @@ -1,6 +1,6 @@ module github.com/DataDog/datadog-agent/pkg/proto -go 1.20 +go 1.21 retract v0.46.0-devel diff --git a/pkg/proto/go.sum b/pkg/proto/go.sum index 101ed4db8f69a8..837c5286cd15d2 100644 --- a/pkg/proto/go.sum +++ b/pkg/proto/go.sum @@ -28,6 +28,7 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/pkg/remoteconfig/state/go.mod b/pkg/remoteconfig/state/go.mod index 495f903e1193b9..d78618539c08f7 100644 --- a/pkg/remoteconfig/state/go.mod +++ b/pkg/remoteconfig/state/go.mod @@ -1,6 +1,6 @@ module github.com/DataDog/datadog-agent/pkg/remoteconfig/state -go 1.20 +go 1.21 require ( github.com/DataDog/go-tuf v1.0.2-0.5.2 diff --git a/pkg/remoteconfig/state/go.sum b/pkg/remoteconfig/state/go.sum index 5c20443851187c..70e4a354917eda 100644 --- a/pkg/remoteconfig/state/go.sum +++ b/pkg/remoteconfig/state/go.sum @@ -3,8 +3,11 @@ github.com/DataDog/go-tuf v1.0.2-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOA github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -14,8 +17,11 @@ github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xe github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/security/module/server.go b/pkg/security/module/server.go index 6dd7118f665036..cc10b6dcfa99bd 100644 --- a/pkg/security/module/server.go +++ b/pkg/security/module/server.go @@ -16,14 +16,10 @@ import ( "sync" "time" - pconfig "github.com/DataDog/datadog-agent/pkg/security/probe/config" - "github.com/DataDog/datadog-agent/pkg/security/probe/kfilters" - "github.com/DataDog/datadog-agent/pkg/security/rules/monitor" - "github.com/DataDog/datadog-agent/pkg/security/utils" - "github.com/DataDog/datadog-go/v5/statsd" - easyjson "github.com/mailru/easyjson" + "github.com/mailru/easyjson" "go.uber.org/atomic" + "golang.org/x/exp/slices" pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/security/common" @@ -31,13 +27,18 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/events" "github.com/DataDog/datadog-agent/pkg/security/metrics" sprobe "github.com/DataDog/datadog-agent/pkg/security/probe" + pconfig "github.com/DataDog/datadog-agent/pkg/security/probe/config" + "github.com/DataDog/datadog-agent/pkg/security/probe/kfilters" "github.com/DataDog/datadog-agent/pkg/security/probe/selftests" "github.com/DataDog/datadog-agent/pkg/security/proto/api" "github.com/DataDog/datadog-agent/pkg/security/reporter" + "github.com/DataDog/datadog-agent/pkg/security/rules/monitor" + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/seclog" "github.com/DataDog/datadog-agent/pkg/security/serializers" + "github.com/DataDog/datadog-agent/pkg/security/utils" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/startstop" "github.com/DataDog/datadog-agent/pkg/version" @@ -56,14 +57,12 @@ type pendingMsg struct { // the runtime security system-probe module and forwards them to Datadog type APIServer struct { api.UnimplementedSecurityModuleServer - msgs chan *api.SecurityEventMessage - directReporter common.RawReporter - activityDumps chan *api.ActivityDumpStreamMessage - expiredEventsLock sync.RWMutex - expiredEvents map[rules.RuleID]*atomic.Int64 - expiredDumps *atomic.Int64 - //nolint:unused // TODO(SEC) Fix unused linter - limiter *events.StdLimiter + msgs chan *api.SecurityEventMessage + directReporter common.RawReporter + activityDumps chan *api.ActivityDumpStreamMessage + expiredEventsLock sync.RWMutex + expiredEvents map[rules.RuleID]*atomic.Int64 + expiredDumps *atomic.Int64 statsdClient statsd.ClientInterface probe *sprobe.Probe queueLock sync.Mutex @@ -179,7 +178,12 @@ func (a *APIServer) start(ctx context.Context) { case now := <-ticker.C: a.dequeue(now, func(msg *pendingMsg) { if msg.extTagsCb != nil { - msg.tags = append(msg.tags, msg.extTagsCb()...) + // dedup + for _, tag := range msg.extTagsCb() { + if !slices.Contains(msg.tags, tag) { + msg.tags = append(msg.tags, tag) + } + } } // recopy tags @@ -291,7 +295,7 @@ func (a *APIServer) SendEvent(rule *rules.Rule, e events.Event, extTagsCb func() ruleEvent.AgentContext.PolicyVersion = policy.Version } - probeJSON, err := marshalEvent(e) + probeJSON, err := marshalEvent(e, rule.Opts) if err != nil { seclog.Errorf("failed to marshal event: %v", err) return @@ -307,13 +311,21 @@ func (a *APIServer) SendEvent(rule *rules.Rule, e events.Event, extTagsCb func() data = append(data, ruleEventJSON[1:]...) seclog.Tracef("Sending event message for rule `%s` to security-agent `%s`", rule.ID, string(data)) + // no retention if there is no ext tags to resolve + retention := a.retention + if extTagsCb == nil { + retention = 0 + } + + // get type tags + container tags if already resolved, see ResolveContainerTags eventTags := e.GetTags() + msg := &pendingMsg{ ruleID: rule.Definition.ID, data: data, extTagsCb: extTagsCb, service: service, - sendAfter: time.Now().Add(a.retention), + sendAfter: time.Now().Add(retention), tags: make([]string, 0, 1+len(rule.Tags)+len(eventTags)+1), } @@ -325,9 +337,9 @@ func (a *APIServer) SendEvent(rule *rules.Rule, e events.Event, extTagsCb func() a.enqueue(msg) } -func marshalEvent(event events.Event) ([]byte, error) { +func marshalEvent(event events.Event, opts *eval.Opts) ([]byte, error) { if ev, ok := event.(*model.Event); ok { - return serializers.MarshalEvent(ev) + return serializers.MarshalEvent(ev, opts) } if ev, ok := event.(events.EventMarshaler); ok { diff --git a/pkg/security/probe/config/config.go b/pkg/security/probe/config/config.go index 102b885368c99c..5981d31735e595 100644 --- a/pkg/security/probe/config/config.go +++ b/pkg/security/probe/config/config.go @@ -41,6 +41,9 @@ type Config struct { // event monitor/probe parameters ebpf.Config + // EnableAllProbes defines if all probes should be activated regardless of loaded rules (while still respecting config, especially network disabled) + EnableAllProbes bool + // EnableKernelFilters defines if in-kernel filtering should be activated or not EnableKernelFilters bool @@ -141,6 +144,7 @@ func NewConfig() (*Config, error) { c := &Config{ Config: *ebpf.NewConfig(), + EnableAllProbes: getBool("enable_all_probes"), EnableKernelFilters: getBool("enable_kernel_filters"), EnableApprovers: getBool("enable_approvers"), EnableDiscarders: getBool("enable_discarders"), diff --git a/pkg/security/probe/constantfetch/btfhub/constants.json b/pkg/security/probe/constantfetch/btfhub/constants.json index 17d7424ee32eef..2b52378c75d392 100644 --- a/pkg/security/probe/constantfetch/btfhub/constants.json +++ b/pkg/security/probe/constantfetch/btfhub/constants.json @@ -1,5 +1,5 @@ { - "commit": "9052c35ea412c8b09f17eeae30ced89621f2a13c", + "commit": "83bfeb8b2e92f99d739e1e79cfd53936d6cd1b9e", "constants": [ { "binprm_file_offset": 168, @@ -14177,6 +14177,13 @@ "uname_release": "4.14.35-2047.532.3.el7uek.aarch64", "cindex": 89 }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.533.3.el7uek.aarch64", + "cindex": 89 + }, { "distrib": "ol", "version": "7", diff --git a/pkg/security/probe/custom_events.go b/pkg/security/probe/custom_events.go index 5556080dfb4f22..aee8fa5568146c 100644 --- a/pkg/security/probe/custom_events.go +++ b/pkg/security/probe/custom_events.go @@ -93,7 +93,7 @@ func (a AbnormalEvent) ToJSON() ([]byte, error) { func NewAbnormalEvent(id string, description string, event *model.Event, err error) (*rules.Rule, *events.CustomEvent) { marshalerCtor := func() events.EventMarshaler { evt := AbnormalEvent{ - Event: serializers.NewEventSerializer(event), + Event: serializers.NewEventSerializer(event, nil), Error: err.Error(), } evt.FillCustomEventCommonFields() diff --git a/pkg/security/probe/discarders_linux.go b/pkg/security/probe/discarders_linux.go index 9f1ab7ee3c22e8..fa1cdb77f044c5 100644 --- a/pkg/security/probe/discarders_linux.go +++ b/pkg/security/probe/discarders_linux.go @@ -60,7 +60,7 @@ type onDiscarderHandler func(rs *rules.RuleSet, event *model.Event, probe *EBPFP var ( allDiscarderHandlers = make(map[eval.EventType][]onDiscarderHandler) dentryInvalidDiscarder = []string{""} - eventZeroDiscarder = model.NewDefaultEvent() + eventZeroDiscarder = model.NewFakeEvent() ) // InvalidDiscarders exposes list of values that are not discarders diff --git a/pkg/security/probe/discarders_test.go b/pkg/security/probe/discarders_test.go index 423ec2b8d3dca1..f2365e07434106 100644 --- a/pkg/security/probe/discarders_test.go +++ b/pkg/security/probe/discarders_test.go @@ -19,8 +19,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/seclog" ) -func newDefaultEvent() eval.Event { - return model.NewDefaultEvent() +func newFakeEvent() eval.Event { + return model.NewFakeEvent() } func TestIsParentDiscarder(t *testing.T) { @@ -39,49 +39,49 @@ func TestIsParentDiscarder(t *testing.T) { WithEventTypeEnabled(enabled). WithLogger(seclog.DefaultLogger) - rs := rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs := rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path =~ "/var/log/*" && unlink.file.path != "/var/log/datadog/system-probe.log"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/var/log/datadog/system-probe.log", 1); is { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path =~ "/var/log/*" && unlink.file.path != "/var/log/datadog/system-probe.log"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/var/lib/datadog/system-probe.sock", 1); !is { t.Error("should be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path == "/var/log/datadog/system-probe.log"`, `unlink.file.name == "datadog"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/var/log/datadog/datadog-agent.log", 1); is { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path =~ "/var/log/*" && unlink.file.name =~ ".*"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/var/lib/.runc/1234", 1); is { t.Error("shouldn't be able to find a parent discarder, due to partial evaluation: true && unlink.file.name =~ '.*'") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path == "/etc/conf.d/httpd.conf" || unlink.file.name == "conf.d"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/etc/conf.d/nginx.conf", 1); is { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path == "/etc/conf.d/httpd.conf" || unlink.file.name == "sys.d"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/etc/sys.d/nginx.conf", 1); is { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.name == "conf.d"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/etc/conf.d/nginx.conf", 1); is { @@ -89,77 +89,77 @@ func TestIsParentDiscarder(t *testing.T) { } // field that doesn't exists shouldn't return any discarders - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `rename.file.path == "/etc/conf.d/abc"`) if is, _ := id.isParentPathDiscarder(rs, model.FileRenameEventType, "rename.file.path", "/etc/conf.d/nginx.conf", 1); is { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `rename.file.path == "/etc/conf.d/abc"`) if is, _ := id.isParentPathDiscarder(rs, model.FileRenameEventType, "rename.file.path", "/etc/nginx/nginx.conf", 1); !is { t.Error("should be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path =~ "/etc/conf.d/*"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/etc/sys.d/nginx.conf", 1); !is { t.Error("should be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path =~ "*/conf.*"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/etc/conf.d/abc", 1); is { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path =~ "/etc/conf.d/ab*"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/etc/conf.d/abc", 1); is { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path =~ "*/conf.d/ab*"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/etc/conf.d/abc", 1); is { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path =~ "*/conf.d"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/etc/conf.d/abc", 1); is { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path =~ "/etc/*"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/etc/cron.d/log", 1); is { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `open.file.path == "/tmp/passwd"`, `open.file.path == "/tmp/secret"`) if is, _ := id.isParentPathDiscarder(rs, model.FileOpenEventType, "open.file.path", "/tmp/runc", 1); is { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `open.file.path =~ "/run/secrets/kubernetes.io/serviceaccount/*/token"`, `open.file.path == "/etc/secret"`) if is, _ := id.isParentPathDiscarder(rs, model.FileOpenEventType, "open.file.path", "/tmp/token", 1); !is { t.Error("should be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `open.file.path =~ "*/token"`, `open.file.path == "/etc/secret"`) is, err := id.isParentPathDiscarder(rs, model.FileOpenEventType, "open.file.path", "/tmp/token", 1) @@ -170,35 +170,35 @@ func TestIsParentDiscarder(t *testing.T) { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `open.file.path =~ "/tmp/dir/no-approver-*"`) if is, _ := id.isParentPathDiscarder(rs, model.FileOpenEventType, "open.file.path", "/tmp/dir/a/test", 1); !is { t.Error("should be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `open.file.path =~ "/"`) if is, _ := id.isParentPathDiscarder(rs, model.FileOpenEventType, "open.file.path", "/tmp", 1); is { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `open.file.path =~ "*/conf.d/aaa"`) if is, _ := id.isParentPathDiscarder(rs, model.FileOpenEventType, "open.file.path", "/tmp/dir/bbb", 1); !is { t.Error("should be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `open.file.path =~ "/etc/**"`) if is, _ := id.isParentPathDiscarder(rs, model.FileOpenEventType, "open.file.path", "/etc/conf.d/dir/aaa", 1); is { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `open.file.path == "/proc/${process.pid}/maps"`) if is, _ := id.isParentPathDiscarder(rs, model.FileOpenEventType, "open.file.path", "/proc/1/maps", 1); is { @@ -206,14 +206,14 @@ func TestIsParentDiscarder(t *testing.T) { } // test basename conflict, a basename based rule matches the parent discarder - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `open.file.path =~ "/var/log/datadog/**" && open.file.name == "token"`) if is, _ := id.isParentPathDiscarder(rs, model.FileOpenEventType, "open.file.path", "/tmp/test1/test2", 1); !is { t.Error("should be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `open.file.path =~ "/var/log/datadog/**" && open.file.name == "test1"`) if is, _ := id.isParentPathDiscarder(rs, model.FileOpenEventType, "open.file.path", "/tmp/test1/test2", 1); is { @@ -236,105 +236,105 @@ func TestIsGrandParentDiscarder(t *testing.T) { WithEventTypeEnabled(enabled). WithLogger(seclog.DefaultLogger) - rs := rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs := rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path == "/var/lib/datadog/system-probe.cache"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/var/run/datadog/system-probe.pid", 2); !is { t.Error("should be a grand parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path =~ "/tmp/test"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/var/lib", 2); is { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path == "/var/run/datadog/system-probe.pid"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/var/run/pids/system-probe.pid", 2); is { t.Error("shouldn't be a grand parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path =~ "/var/lib/datadog/system-probe.cache"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/var/run/datadog/system-probe.pid", 2); !is { t.Error("should be a grand parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path =~ "/var/run/datadog/system-probe.pid"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/var/run/pids/system-probe.pid", 2); is { t.Error("shouldn't be a grand parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path =~ "*/run/datadog/system-probe.pid"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/var/run/datadog/system-probe.pid", 2); is { t.Error("shouldn't be a grand parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path =~ "*/run/datadog/system-probe.pid"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/var/lib/datadog/system-probe.pid", 2); !is { t.Error("should be a grand parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path =~ "/var/*/datadog/system-probe.pid"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/var/run/datadog/system-probe.pid", 2); is { t.Error("shouldn't be a grand parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path =~ "/var/lib/datadog/system-probe.pid"`, `unlink.file.name =~ "run"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/var/run/datadog/system-probe.pid", 2); is { t.Error("shouldn't be a grand parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path =~ "/var/*"`, `unlink.file.name =~ "run"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/var/run/datadog/system-probe.pid", 2); is { t.Error("shouldn't be a grand parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `open.file.path =~ "/tmp/dir/*"`) if is, _ := id.isParentPathDiscarder(rs, model.FileOpenEventType, "open.file.path", "/tmp/dir/a/test", 2); is { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.name == "dir"`) // + variants if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/tmp/dir/a/test", 2); is { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path == "/tmp/dir/a"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/tmp", 2); is { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path == "/tmp"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/tmp/dir/a", 2); is { t.Error("shouldn't be a parent discarder") } - rs = rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs = rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(t, rs, `unlink.file.path == "/tmp"`) if is, _ := id.isParentPathDiscarder(rs, model.FileUnlinkEventType, "unlink.file.path", "/tmp", 2); is { @@ -370,7 +370,7 @@ func TestIsDiscarderOverride(t *testing.T) { var listener testEventListener - rs := rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs := rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) rs.AddListener(&listener) kfilters.AddRuleExpr(t, rs, `unlink.file.path == "/var/log/httpd" && process.file.path == "/bin/touch"`) @@ -415,7 +415,7 @@ func BenchmarkParentDiscarder(b *testing.B) { WithEventTypeEnabled(enabled). WithLogger(seclog.DefaultLogger) - rs := rules.NewRuleSet(&model.Model{}, newDefaultEvent, &opts, &evalOpts) + rs := rules.NewRuleSet(&model.Model{}, newFakeEvent, &opts, &evalOpts) kfilters.AddRuleExpr(b, rs, `unlink.file.path =~ "/var/log/*" && unlink.file.path != "/var/log/datadog/system-probe.log"`) b.ResetTimer() diff --git a/pkg/security/probe/field_handlers_ebpfless.go b/pkg/security/probe/field_handlers_ebpfless.go index c18f14922b34f9..d9026b6c2deda1 100644 --- a/pkg/security/probe/field_handlers_ebpfless.go +++ b/pkg/security/probe/field_handlers_ebpfless.go @@ -22,10 +22,6 @@ import ( // EBPFLessFieldHandlers defines a field handlers type EBPFLessFieldHandlers struct { - // TODO(safchain) remove this when support for multiple platform with the same build tags is available - // keeping it can be dangerous as it can hide non implemented handlers - model.DefaultFieldHandlers - config *config.Config resolvers *resolvers.EBPFLessResolvers } @@ -186,3 +182,164 @@ func (fh *EBPFLessFieldHandlers) ResolveContainerTags(_ *model.Event, e *model.C func (fh *EBPFLessFieldHandlers) ResolveProcessCreatedAt(_ *model.Event, e *model.Process) int { return int(e.ExecTime.UnixNano()) } + +// ResolveAsync resolves the async flag +func (fh *EBPFLessFieldHandlers) ResolveAsync(ev *model.Event) bool { return ev.Async } + +// ResolveChownGID resolves the ResolveProcessCacheEntry group id of a chown event to a username +func (fh *EBPFLessFieldHandlers) ResolveChownGID(_ *model.Event, e *model.ChownEvent) string { + return e.Group +} + +// ResolveChownUID resolves the ResolveProcessCacheEntry id of a chown event to a username +func (fh *EBPFLessFieldHandlers) ResolveChownUID(_ *model.Event, e *model.ChownEvent) string { + return e.User +} + +// ResolveEventTimestamp resolves the monolitic kernel event timestamp to an absolute time +func (fh *EBPFLessFieldHandlers) ResolveEventTimestamp(_ *model.Event, e *model.BaseEvent) int { + return int(e.TimestampRaw) +} + +// ResolveFileFieldsGroup resolves the group id of the file to a group name +func (fh *EBPFLessFieldHandlers) ResolveFileFieldsGroup(_ *model.Event, e *model.FileFields) string { + return e.Group +} + +// ResolveFileFieldsInUpperLayer resolves whether the file is in an upper layer +func (fh *EBPFLessFieldHandlers) ResolveFileFieldsInUpperLayer(_ *model.Event, e *model.FileFields) bool { + return e.InUpperLayer +} + +// ResolveFileFieldsUser resolves the user id of the file to a username +func (fh *EBPFLessFieldHandlers) ResolveFileFieldsUser(_ *model.Event, e *model.FileFields) string { + return e.User +} + +// ResolveFileFilesystem resolves the filesystem a file resides in +func (fh *EBPFLessFieldHandlers) ResolveFileFilesystem(_ *model.Event, e *model.FileEvent) string { + return e.Filesystem +} + +// ResolveHashesFromEvent resolves the hashes of the requested event +func (fh *EBPFLessFieldHandlers) ResolveHashesFromEvent(_ *model.Event, e *model.FileEvent) []string { + return e.Hashes +} + +// ResolveK8SGroups resolves the k8s groups of the event +func (fh *EBPFLessFieldHandlers) ResolveK8SGroups(_ *model.Event, e *model.UserSessionContext) []string { + return e.K8SGroups +} + +// ResolveK8SUID resolves the k8s UID of the event +func (fh *EBPFLessFieldHandlers) ResolveK8SUID(_ *model.Event, e *model.UserSessionContext) string { + return e.K8SUID +} + +// ResolveK8SUsername resolves the k8s username of the event +func (fh *EBPFLessFieldHandlers) ResolveK8SUsername(_ *model.Event, e *model.UserSessionContext) string { + return e.K8SUsername +} + +// ResolveModuleArgs resolves the correct args if the arguments were truncated, if not return module.Args +func (fh *EBPFLessFieldHandlers) ResolveModuleArgs(_ *model.Event, e *model.LoadModuleEvent) string { + return e.Args +} + +// ResolveModuleArgv resolves the unscrubbed args of the module as an array. Use with caution. +func (fh *EBPFLessFieldHandlers) ResolveModuleArgv(_ *model.Event, e *model.LoadModuleEvent) []string { + return e.Argv +} + +// ResolveMountPointPath resolves a mount point path +func (fh *EBPFLessFieldHandlers) ResolveMountPointPath(_ *model.Event, e *model.MountEvent) string { + return e.MountPointPath +} + +// ResolveMountRootPath resolves a mount root path +func (fh *EBPFLessFieldHandlers) ResolveMountRootPath(_ *model.Event, e *model.MountEvent) string { + return e.MountRootPath +} + +// ResolveMountSourcePath resolves a mount source path +func (fh *EBPFLessFieldHandlers) ResolveMountSourcePath(_ *model.Event, e *model.MountEvent) string { + return e.MountSourcePath +} + +// ResolveNetworkDeviceIfName returns the network iterface name from the network context +func (fh *EBPFLessFieldHandlers) ResolveNetworkDeviceIfName(_ *model.Event, e *model.NetworkDeviceContext) string { + return e.IfName +} + +// ResolvePackageName resolves the name of the package providing this file +func (fh *EBPFLessFieldHandlers) ResolvePackageName(_ *model.Event, e *model.FileEvent) string { + return e.PkgName +} + +// ResolvePackageSourceVersion resolves the version of the source package of the package providing this file +func (fh *EBPFLessFieldHandlers) ResolvePackageSourceVersion(_ *model.Event, e *model.FileEvent) string { + return e.PkgSrcVersion +} + +// ResolvePackageVersion resolves the version of the package providing this file +func (fh *EBPFLessFieldHandlers) ResolvePackageVersion(_ *model.Event, e *model.FileEvent) string { + return e.PkgVersion +} + +// ResolveRights resolves the rights of a file +func (fh *EBPFLessFieldHandlers) ResolveRights(_ *model.Event, e *model.FileFields) int { + return int(e.Mode) +} + +// ResolveSELinuxBoolName resolves the boolean name of the SELinux event +func (fh *EBPFLessFieldHandlers) ResolveSELinuxBoolName(_ *model.Event, e *model.SELinuxEvent) string { + return e.BoolName +} + +// ResolveSetgidEGroup resolves the effective group of the Setgid event +func (fh *EBPFLessFieldHandlers) ResolveSetgidEGroup(_ *model.Event, e *model.SetgidEvent) string { + return e.EGroup +} + +// ResolveSetgidFSGroup resolves the file-system group of the Setgid event +func (fh *EBPFLessFieldHandlers) ResolveSetgidFSGroup(_ *model.Event, e *model.SetgidEvent) string { + return e.FSGroup +} + +// ResolveSetgidGroup resolves the group of the Setgid event +func (fh *EBPFLessFieldHandlers) ResolveSetgidGroup(_ *model.Event, e *model.SetgidEvent) string { + return e.Group +} + +// ResolveSetuidEUser resolves the effective user of the Setuid event +func (fh *EBPFLessFieldHandlers) ResolveSetuidEUser(_ *model.Event, e *model.SetuidEvent) string { + return e.EUser +} + +// ResolveSetuidFSUser resolves the file-system user of the Setuid event +func (fh *EBPFLessFieldHandlers) ResolveSetuidFSUser(_ *model.Event, e *model.SetuidEvent) string { + return e.FSUser +} + +// ResolveSetuidUser resolves the user of the Setuid event +func (fh *EBPFLessFieldHandlers) ResolveSetuidUser(_ *model.Event, e *model.SetuidEvent) string { + return e.User +} + +// ResolveXAttrName returns the string representation of the extended attribute name +func (fh *EBPFLessFieldHandlers) ResolveXAttrName(_ *model.Event, e *model.SetXAttrEvent) string { + return e.Name +} + +// ResolveXAttrNamespace returns the string representation of the extended attribute namespace +func (fh *EBPFLessFieldHandlers) ResolveXAttrNamespace(_ *model.Event, e *model.SetXAttrEvent) string { + return e.Namespace +} + +// ResolveHashes resolves the hash of the provided file +func (fh *EBPFLessFieldHandlers) ResolveHashes(_ model.EventType, _ *model.Process, _ *model.FileEvent) []string { + return nil +} + +// ResolveUserSessionContext resolves and updates the provided user session context +func (fh *EBPFLessFieldHandlers) ResolveUserSessionContext(_ *model.UserSessionContext) {} diff --git a/pkg/security/probe/field_handlers_windows.go b/pkg/security/probe/field_handlers_windows.go index 60d0c81ce381da..b7183daf13bbbc 100644 --- a/pkg/security/probe/field_handlers_windows.go +++ b/pkg/security/probe/field_handlers_windows.go @@ -16,10 +16,6 @@ import ( // FieldHandlers defines a field handlers type FieldHandlers struct { - // TODO(safchain) remove this when support for multiple platform with the same build tags is available - // keeping it can be dangerous as it can hide non implemented handlers - model.DefaultFieldHandlers - config *config.Config resolvers *resolvers.Resolvers } @@ -98,3 +94,33 @@ func (fh *FieldHandlers) ResolveProcessCmdLineScrubbed(_ *model.Event, e *model. func (fh *FieldHandlers) ResolveUser(_ *model.Event, process *model.Process) string { return fh.resolvers.UserGroupResolver.GetUser(process.OwnerSidString) } + +// ResolveContainerCreatedAt resolves the container creation time of the event +func (fh *FieldHandlers) ResolveContainerCreatedAt(_ *model.Event, e *model.ContainerContext) int { + return int(e.CreatedAt) +} + +// ResolveContainerID resolves the container ID of the event +func (fh *FieldHandlers) ResolveContainerID(_ *model.Event, e *model.ContainerContext) string { + return e.ID +} + +// ResolveContainerTags resolves the container tags of the event +func (fh *FieldHandlers) ResolveContainerTags(_ *model.Event, e *model.ContainerContext) []string { + return e.Tags +} + +// ResolveEventTimestamp resolves the monolitic kernel event timestamp to an absolute time +func (fh *FieldHandlers) ResolveEventTimestamp(_ *model.Event, e *model.BaseEvent) int { + return int(e.TimestampRaw) +} + +// ResolveProcessCmdLine resolves the cmd line of the process of the event +func (fh *FieldHandlers) ResolveProcessCmdLine(_ *model.Event, e *model.Process) string { + return e.CmdLine +} + +// ResolveProcessCreatedAt resolves the process creation time of the event +func (fh *FieldHandlers) ResolveProcessCreatedAt(_ *model.Event, e *model.Process) int { + return int(e.CreatedAt) +} diff --git a/pkg/security/probe/kfilters/approvers_test.go b/pkg/security/probe/kfilters/approvers_test.go index 8a727d7ed002e3..3fd6e8c2120b15 100644 --- a/pkg/security/probe/kfilters/approvers_test.go +++ b/pkg/security/probe/kfilters/approvers_test.go @@ -16,8 +16,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) -func newDefaultEvent() eval.Event { - return model.NewDefaultEvent() +func newFakeEvent() eval.Event { + return model.NewFakeEvent() } func TestApproverAncestors1(t *testing.T) { @@ -25,7 +25,7 @@ func TestApproverAncestors1(t *testing.T) { ruleOpts, evalOpts := rules.NewEvalOpts(enabled) - rs := rules.NewRuleSet(&model.Model{}, newDefaultEvent, ruleOpts, evalOpts) + rs := rules.NewRuleSet(&model.Model{}, newFakeEvent, ruleOpts, evalOpts) AddRuleExpr(t, rs, `open.file.path == "/etc/passwd" && process.ancestors.file.name == "vipw"`, `open.file.path == "/etc/shadow" && process.ancestors.file.name == "vipw"`) capabilities, exists := allCapabilities["open"] @@ -48,7 +48,7 @@ func TestApproverAncestors2(t *testing.T) { ruleOpts, evalOpts := rules.NewEvalOpts(enabled) - rs := rules.NewRuleSet(&model.Model{}, newDefaultEvent, ruleOpts, evalOpts) + rs := rules.NewRuleSet(&model.Model{}, newFakeEvent, ruleOpts, evalOpts) AddRuleExpr(t, rs, `(open.file.path == "/etc/shadow" || open.file.path == "/etc/gshadow") && process.ancestors.file.path not in ["/usr/bin/dpkg"]`) capabilities, exists := allCapabilities["open"] if !exists { @@ -68,7 +68,7 @@ func TestApproverAncestors3(t *testing.T) { ruleOpts, evalOpts := rules.NewEvalOpts(enabled) - rs := rules.NewRuleSet(&model.Model{}, newDefaultEvent, ruleOpts, evalOpts) + rs := rules.NewRuleSet(&model.Model{}, newFakeEvent, ruleOpts, evalOpts) AddRuleExpr(t, rs, `open.file.path =~ "/var/run/secrets/eks.amazonaws.com/serviceaccount/*/token" && process.file.path not in ["/bin/kubectl"]`) capabilities, exists := allCapabilities["open"] if !exists { diff --git a/pkg/security/probe/model_ebpf.go b/pkg/security/probe/model_ebpf.go index 4ead826c0a5443..c817b24b1d9beb 100644 --- a/pkg/security/probe/model_ebpf.go +++ b/pkg/security/probe/model_ebpf.go @@ -39,7 +39,7 @@ func NewEBPFModel(probe *EBPFProbe) *model.Model { // NewEBPFEvent returns a new event func NewEBPFEvent(fh *EBPFFieldHandlers) *model.Event { - event := model.NewDefaultEvent() + event := model.NewFakeEvent() event.FieldHandlers = fh return event } diff --git a/pkg/security/probe/model_ebpfless.go b/pkg/security/probe/model_ebpfless.go index 27b9e240b8905f..582dd23351ba90 100644 --- a/pkg/security/probe/model_ebpfless.go +++ b/pkg/security/probe/model_ebpfless.go @@ -53,7 +53,7 @@ func NewEBPFLessModel() *model.Model { // NewEBPFLessEvent returns a new event func NewEBPFLessEvent(fh *EBPFLessFieldHandlers) *model.Event { - event := model.NewDefaultEvent() + event := model.NewFakeEvent() event.FieldHandlers = fh return event } diff --git a/pkg/security/probe/model_windows.go b/pkg/security/probe/model_windows.go index cb7520cd646911..9108f60007c7df 100644 --- a/pkg/security/probe/model_windows.go +++ b/pkg/security/probe/model_windows.go @@ -31,7 +31,7 @@ func NewWindowsModel(_ *WindowsProbe) *model.Model { // NewWindowsEvent returns a new event func NewWindowsEvent(fh *FieldHandlers) *model.Event { - event := model.NewDefaultEvent() + event := model.NewFakeEvent() event.FieldHandlers = fh return event } diff --git a/pkg/security/probe/probe_ebpf.go b/pkg/security/probe/probe_ebpf.go index 4b1594de63a4d7..5086e5a7ed72ed 100644 --- a/pkg/security/probe/probe_ebpf.go +++ b/pkg/security/probe/probe_ebpf.go @@ -394,7 +394,7 @@ func (p *EBPFProbe) AddActivityDumpHandler(handler dump.ActivityDumpHandler) { // DispatchEvent sends an event to the probe event handler func (p *EBPFProbe) DispatchEvent(event *model.Event) { traceEvent("Dispatching event %s", func() ([]byte, model.EventType, error) { - eventJSON, err := serializers.MarshalEvent(event) + eventJSON, err := serializers.MarshalEvent(event, nil) return eventJSON, event.GetEventType(), err }) @@ -443,7 +443,7 @@ func (p *EBPFProbe) GetMonitors() *EBPFMonitors { // EventMarshallerCtor returns the event marshaller ctor func (p *EBPFProbe) EventMarshallerCtor(event *model.Event) func() events.EventMarshaler { return func() events.EventMarshaler { - return serializers.NewEventSerializer(event) + return serializers.NewEventSerializer(event, nil) } } @@ -1118,7 +1118,7 @@ func (p *EBPFProbe) updateProbes(ruleEventTypes []eval.EventType, needRawSyscall // extract probe to activate per the event types for eventType, selectors := range probes.GetSelectorsPerEventType(p.useFentry) { - if (eventType == "*" || slices.Contains(eventTypes, eventType) || p.isNeededForActivityDump(eventType) || p.isNeededForSecurityProfile(eventType)) && p.validEventTypeForConfig(eventType) { + if (eventType == "*" || slices.Contains(eventTypes, eventType) || p.isNeededForActivityDump(eventType) || p.isNeededForSecurityProfile(eventType) || p.config.Probe.EnableAllProbes) && p.validEventTypeForConfig(eventType) { activatedProbes = append(activatedProbes, selectors...) } } diff --git a/pkg/security/probe/probe_ebpf_test.go b/pkg/security/probe/probe_ebpf_test.go index 132f346b598930..98003e6fd2c3d0 100644 --- a/pkg/security/probe/probe_ebpf_test.go +++ b/pkg/security/probe/probe_ebpf_test.go @@ -42,7 +42,7 @@ func (MockEventHandler) Copy(incomingEvent *model.Event) any { // benchstat old.txt new.txt func BenchmarkSendSpecificEvent(b *testing.B) { eventHandler := MockEventHandler{} - execEvent := model.NewDefaultEvent() + execEvent := model.NewFakeEvent() execEvent.Type = uint32(model.ExecEventType) type fields struct { diff --git a/pkg/security/probe/probe_ebpfless.go b/pkg/security/probe/probe_ebpfless.go index 7622401f1d5193..0b2a27b756b48b 100644 --- a/pkg/security/probe/probe_ebpfless.go +++ b/pkg/security/probe/probe_ebpfless.go @@ -277,7 +277,7 @@ func (p *EBPFLessProbe) handleSyscallMsg(cl *client, syscallMsg *ebpfless.Syscal // DispatchEvent sends an event to the probe event handler func (p *EBPFLessProbe) DispatchEvent(event *model.Event) { traceEvent("Dispatching event %s", func() ([]byte, model.EventType, error) { - eventJSON, err := serializers.MarshalEvent(event) + eventJSON, err := serializers.MarshalEvent(event, nil) return eventJSON, event.GetEventType(), err }) diff --git a/pkg/security/probe/probe_windows.go b/pkg/security/probe/probe_windows.go index 290e481e1a9a64..5fbd15b4f93f92 100644 --- a/pkg/security/probe/probe_windows.go +++ b/pkg/security/probe/probe_windows.go @@ -411,7 +411,7 @@ func (p *WindowsProbe) Start() error { // DispatchEvent sends an event to the probe event handler func (p *WindowsProbe) DispatchEvent(event *model.Event) { traceEvent("Dispatching event %s", func() ([]byte, model.EventType, error) { - eventJSON, err := serializers.MarshalEvent(event) + eventJSON, err := serializers.MarshalEvent(event, nil) return eventJSON, event.GetEventType(), err }) diff --git a/pkg/security/probe/selftests/tester_linux.go b/pkg/security/probe/selftests/tester_linux.go index 7e4de6b8d91891..519feae5c33f0d 100644 --- a/pkg/security/probe/selftests/tester_linux.go +++ b/pkg/security/probe/selftests/tester_linux.go @@ -201,7 +201,7 @@ func (t *SelfTester) IsExpectedEvent(rule *rules.Rule, event eval.Event, _ *prob return true } - s := serializers.NewEventSerializer(ev) + s := serializers.NewEventSerializer(ev, rule.Opts) if s == nil || s.FileEventSerializer == nil { return true } diff --git a/pkg/security/proto/api/api_grpc.pb.go b/pkg/security/proto/api/api_grpc.pb.go index 63caa51cc6bccb..be5a543af02725 100644 --- a/pkg/security/proto/api/api_grpc.pb.go +++ b/pkg/security/proto/api/api_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.25.2 +// - protoc v3.12.4 // source: pkg/security/proto/api/api.proto package api diff --git a/pkg/security/rconfig/policies.go b/pkg/security/rconfig/policies.go index 80763c0de6f3be..c9c00e224bc8fe 100644 --- a/pkg/security/rconfig/policies.go +++ b/pkg/security/rconfig/policies.go @@ -56,7 +56,7 @@ func NewRCPolicyProvider() (*RCPolicyProvider, error) { return nil, fmt.Errorf("failed to get ipc address: %w", err) } - c, err := client.NewGRPCClient(ipcAddress, config.GetIPCPort(), security.FetchAuthToken, + c, err := client.NewGRPCClient(ipcAddress, config.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(config.Datadog) }, client.WithAgent(agentName, agentVersion.String()), client.WithProducts([]data.Product{data.ProductCWSDD, data.ProductCWSCustom}), client.WithPollInterval(securityAgentRCPollInterval), @@ -136,10 +136,9 @@ func (r *RCPolicyProvider) LoadPolicies(macroFilters []rules.MacroFilter, ruleFi policy, err := rules.LoadPolicy(id, rules.PolicyProviderTypeRC, reader, macroFilters, ruleFilters) if err != nil { errs = multierror.Append(errs, err) - } else { - normalize(policy) - policies = append(policies, policy) } + normalize(policy) + policies = append(policies, policy) } for _, c := range r.lastDefaults { diff --git a/pkg/security/rconfig/profiles.go b/pkg/security/rconfig/profiles.go index 20851b4564ff1b..001814db15921c 100644 --- a/pkg/security/rconfig/profiles.go +++ b/pkg/security/rconfig/profiles.go @@ -149,7 +149,7 @@ func NewRCProfileProvider() (*RCProfileProvider, error) { return nil, fmt.Errorf("failed to get ipc address: %w", err) } - c, err := client.NewGRPCClient(ipcAddress, config.GetIPCPort(), security.FetchAuthToken, + c, err := client.NewGRPCClient(ipcAddress, config.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(config.Datadog) }, client.WithAgent(agentName, agentVersion.String()), client.WithProducts([]data.Product{data.ProductCWSProfile}), client.WithPollInterval(securityAgentRCPollInterval)) diff --git a/pkg/security/resolvers/hash/resolver_test.go b/pkg/security/resolvers/hash/resolver_test.go index e288eee3087574..bcd089855337c9 100644 --- a/pkg/security/resolvers/hash/resolver_test.go +++ b/pkg/security/resolvers/hash/resolver_test.go @@ -58,7 +58,7 @@ func TestResolver_ComputeHashes(t *testing.T) { args: args{ event: &model.Event{ BaseEvent: model.BaseEvent{ - FieldHandlers: &model.DefaultFieldHandlers{}, + FieldHandlers: &model.FakeFieldHandlers{}, Type: uint32(model.ExecEventType), ProcessContext: &model.ProcessContext{ Process: model.Process{ @@ -95,7 +95,7 @@ func TestResolver_ComputeHashes(t *testing.T) { args: args{ event: &model.Event{ BaseEvent: model.BaseEvent{ - FieldHandlers: &model.DefaultFieldHandlers{}, + FieldHandlers: &model.FakeFieldHandlers{}, Type: uint32(model.ExecEventType), ProcessContext: &model.ProcessContext{ Process: model.Process{ @@ -128,7 +128,7 @@ func TestResolver_ComputeHashes(t *testing.T) { args: args{ event: &model.Event{ BaseEvent: model.BaseEvent{ - FieldHandlers: &model.DefaultFieldHandlers{}, + FieldHandlers: &model.FakeFieldHandlers{}, Type: uint32(model.ExecEventType), ProcessContext: &model.ProcessContext{ Process: model.Process{ @@ -165,7 +165,7 @@ func TestResolver_ComputeHashes(t *testing.T) { args: args{ event: &model.Event{ BaseEvent: model.BaseEvent{ - FieldHandlers: &model.DefaultFieldHandlers{}, + FieldHandlers: &model.FakeFieldHandlers{}, Type: uint32(model.ExecEventType), ProcessContext: &model.ProcessContext{ Process: model.Process{ @@ -198,7 +198,7 @@ func TestResolver_ComputeHashes(t *testing.T) { args: args{ event: &model.Event{ BaseEvent: model.BaseEvent{ - FieldHandlers: &model.DefaultFieldHandlers{}, + FieldHandlers: &model.FakeFieldHandlers{}, Type: uint32(model.ExecEventType), ProcessContext: &model.ProcessContext{ Process: model.Process{ @@ -500,7 +500,7 @@ func BenchmarkHashFunctions(b *testing.B) { for i := 0; i < caseB.N; i++ { got := resolver.ComputeHashesFromEvent(&model.Event{ BaseEvent: model.BaseEvent{ - FieldHandlers: &model.DefaultFieldHandlers{}, + FieldHandlers: &model.FakeFieldHandlers{}, Type: uint32(model.ExecEventType), ProcessContext: &model.ProcessContext{ Process: model.Process{ diff --git a/pkg/security/resolvers/process/resolver_test.go b/pkg/security/resolvers/process/resolver_test.go index 523cdc30555e87..3b92aabb061aa8 100644 --- a/pkg/security/resolvers/process/resolver_test.go +++ b/pkg/security/resolvers/process/resolver_test.go @@ -621,7 +621,7 @@ func TestExecLostExec(t *testing.T) { assert.True(t, child2.IsParentMissing) } -func TestIsExecChildRuntime(t *testing.T) { +func TestIsExecExecRuntime(t *testing.T) { resolver, err := NewEBPFResolver(nil, nil, &statsd.NoOpClient{}, nil, nil, nil, nil, nil, nil, nil, NewResolverOpts()) if err != nil { t.Fatal(err) @@ -661,16 +661,16 @@ func TestIsExecChildRuntime(t *testing.T) { child3.PPid = child2.Pid resolver.AddExecEntry(child3, 0) - assert.False(t, parent.IsExecChild) + assert.False(t, parent.IsExecExec) assert.False(t, parent.IsThread) // root node, no fork - assert.False(t, child.IsExecChild) + assert.False(t, child.IsExecExec) assert.True(t, child.IsThread) - assert.False(t, child2.IsExecChild) + assert.False(t, child2.IsExecExec) assert.False(t, child2.IsThread) - assert.True(t, child3.IsExecChild) + assert.True(t, child3.IsExecExec) assert.False(t, child3.IsThread) child4 := resolver.NewProcessCacheEntry(model.PIDContext{Pid: 2, Tid: 2}) @@ -678,11 +678,11 @@ func TestIsExecChildRuntime(t *testing.T) { child4.PPid = child3.Pid resolver.AddExecEntry(child4, 0) - assert.True(t, child3.IsExecChild) + assert.True(t, child3.IsExecExec) assert.False(t, child3.IsThread) } -func TestIsExecChildSnapshot(t *testing.T) { +func TestIsExecExecSnapshot(t *testing.T) { resolver, err := NewEBPFResolver(nil, nil, &statsd.NoOpClient{}, nil, nil, nil, nil, nil, nil, nil, NewResolverOpts()) if err != nil { t.Fatal(err) @@ -707,10 +707,10 @@ func TestIsExecChildSnapshot(t *testing.T) { resolver.setAncestor(child) resolver.insertEntry(child, nil, model.ProcessCacheEntryFromSnapshot) - assert.False(t, parent.IsExecChild) + assert.False(t, parent.IsExecExec) assert.True(t, parent.IsThread) // root node, no fork - assert.False(t, child.IsExecChild) + assert.False(t, child.IsExecExec) assert.True(t, child.IsThread) // parent @@ -722,7 +722,7 @@ func TestIsExecChildSnapshot(t *testing.T) { child2.PPid = child.Pid resolver.AddExecEntry(child2, 0) - assert.False(t, child2.IsExecChild) + assert.False(t, child2.IsExecExec) assert.False(t, child2.IsThread) child3 := resolver.NewProcessCacheEntry(model.PIDContext{Pid: 2, Tid: 2}) @@ -730,6 +730,6 @@ func TestIsExecChildSnapshot(t *testing.T) { child3.PPid = child2.Pid resolver.AddExecEntry(child3, 0) - assert.True(t, child3.IsExecChild) + assert.True(t, child3.IsExecExec) assert.False(t, child3.IsThread) } diff --git a/pkg/security/rules/engine.go b/pkg/security/rules/engine.go index 30eb0b6d18152f..a0561f2bff699a 100644 --- a/pkg/security/rules/engine.go +++ b/pkg/security/rules/engine.go @@ -39,6 +39,8 @@ const ( ProbeEvaluationRuleSetTagValue = "probe_evaluation" // ThreatScoreRuleSetTagValue defines the threat-score rule-set tag value ThreatScoreRuleSetTagValue = "threat_score" + // TagMaxResolutionDelay maximum tag resolution delay + TagMaxResolutionDelay = 5 * time.Second ) // RuleEngine defines a rule engine @@ -427,9 +429,19 @@ func (e *RuleEngine) RuleMatch(rule *rules.Rule, event eval.Event) bool { // needs to be resolved here, outside of the callback as using process tree // which can be modified during queuing service := e.probe.GetService(ev) - containerID := ev.ContainerContext.ID - extTagsCb := func() []string { - return e.probe.GetEventTags(containerID) + + var extTagsCb func() []string + + if ev.ContainerContext.ID != "" { + // copy the container ID here to avoid later data race + containerID := ev.ContainerContext.ID + + // the container tags might not be resolved yet + if time.Unix(0, int64(ev.ContainerContext.CreatedAt)).Add(TagMaxResolutionDelay).After(time.Now()) { + extTagsCb = func() []string { + return e.probe.GetEventTags(containerID) + } + } } e.eventSender.SendEvent(rule, ev, extTagsCb, service) diff --git a/pkg/security/secl/compiler/generators/accessors/field_handlers.tmpl b/pkg/security/secl/compiler/generators/accessors/field_handlers.tmpl index 11ead370e5ca2e..8e90a647b70c65 100644 --- a/pkg/security/secl/compiler/generators/accessors/field_handlers.tmpl +++ b/pkg/security/secl/compiler/generators/accessors/field_handlers.tmpl @@ -29,7 +29,7 @@ func (ev *Event) resolveFields(forADs bool) { {{- if $Field.GettersOnly }} {{continue}} {{end}} - + {{- if and (eq $Field.Event "*") }} {{ $resolver := $Field | GetFieldHandler $.AllFields }} {{ if and (ne $resolver "") (not (hasKey $uniqueResolvers $resolver)) }} @@ -104,9 +104,9 @@ type FieldHandlers interface { ExtraFieldHandlers } -type DefaultFieldHandlers struct {} +type FakeFieldHandlers struct {} {{$Handlers := .Fields | GetHandlers}} {{range $Proto, $Impl := $Handlers}} - func (dfh *DefaultFieldHandlers) {{$Proto}} {{$Impl}} + func (dfh *FakeFieldHandlers) {{$Proto}} {{$Impl}} {{end}} diff --git a/pkg/security/secl/model/field_handlers_unix.go b/pkg/security/secl/model/field_handlers_unix.go index 91cc81ba39214c..a99f7c43cf71d5 100644 --- a/pkg/security/secl/model/field_handlers_unix.go +++ b/pkg/security/secl/model/field_handlers_unix.go @@ -1028,125 +1028,105 @@ type FieldHandlers interface { // custom handlers not tied to any fields ExtraFieldHandlers } -type DefaultFieldHandlers struct{} +type FakeFieldHandlers struct{} -func (dfh *DefaultFieldHandlers) ResolveAsync(ev *Event) bool { return ev.Async } -func (dfh *DefaultFieldHandlers) ResolveChownGID(ev *Event, e *ChownEvent) string { return e.Group } -func (dfh *DefaultFieldHandlers) ResolveChownUID(ev *Event, e *ChownEvent) string { return e.User } -func (dfh *DefaultFieldHandlers) ResolveContainerCreatedAt(ev *Event, e *ContainerContext) int { +func (dfh *FakeFieldHandlers) ResolveAsync(ev *Event) bool { return ev.Async } +func (dfh *FakeFieldHandlers) ResolveChownGID(ev *Event, e *ChownEvent) string { return e.Group } +func (dfh *FakeFieldHandlers) ResolveChownUID(ev *Event, e *ChownEvent) string { return e.User } +func (dfh *FakeFieldHandlers) ResolveContainerCreatedAt(ev *Event, e *ContainerContext) int { return int(e.CreatedAt) } -func (dfh *DefaultFieldHandlers) ResolveContainerID(ev *Event, e *ContainerContext) string { - return e.ID -} -func (dfh *DefaultFieldHandlers) ResolveContainerTags(ev *Event, e *ContainerContext) []string { +func (dfh *FakeFieldHandlers) ResolveContainerID(ev *Event, e *ContainerContext) string { return e.ID } +func (dfh *FakeFieldHandlers) ResolveContainerTags(ev *Event, e *ContainerContext) []string { return e.Tags } -func (dfh *DefaultFieldHandlers) ResolveEventTime(ev *Event, e *BaseEvent) time.Time { - return e.Timestamp -} -func (dfh *DefaultFieldHandlers) ResolveEventTimestamp(ev *Event, e *BaseEvent) int { +func (dfh *FakeFieldHandlers) ResolveEventTime(ev *Event, e *BaseEvent) time.Time { return e.Timestamp } +func (dfh *FakeFieldHandlers) ResolveEventTimestamp(ev *Event, e *BaseEvent) int { return int(e.TimestampRaw) } -func (dfh *DefaultFieldHandlers) ResolveFileBasename(ev *Event, e *FileEvent) string { +func (dfh *FakeFieldHandlers) ResolveFileBasename(ev *Event, e *FileEvent) string { return e.BasenameStr } -func (dfh *DefaultFieldHandlers) ResolveFileFieldsGroup(ev *Event, e *FileFields) string { - return e.Group -} -func (dfh *DefaultFieldHandlers) ResolveFileFieldsInUpperLayer(ev *Event, e *FileFields) bool { +func (dfh *FakeFieldHandlers) ResolveFileFieldsGroup(ev *Event, e *FileFields) string { return e.Group } +func (dfh *FakeFieldHandlers) ResolveFileFieldsInUpperLayer(ev *Event, e *FileFields) bool { return e.InUpperLayer } -func (dfh *DefaultFieldHandlers) ResolveFileFieldsUser(ev *Event, e *FileFields) string { - return e.User -} -func (dfh *DefaultFieldHandlers) ResolveFileFilesystem(ev *Event, e *FileEvent) string { +func (dfh *FakeFieldHandlers) ResolveFileFieldsUser(ev *Event, e *FileFields) string { return e.User } +func (dfh *FakeFieldHandlers) ResolveFileFilesystem(ev *Event, e *FileEvent) string { return e.Filesystem } -func (dfh *DefaultFieldHandlers) ResolveFilePath(ev *Event, e *FileEvent) string { - return e.PathnameStr -} -func (dfh *DefaultFieldHandlers) ResolveHashesFromEvent(ev *Event, e *FileEvent) []string { +func (dfh *FakeFieldHandlers) ResolveFilePath(ev *Event, e *FileEvent) string { return e.PathnameStr } +func (dfh *FakeFieldHandlers) ResolveHashesFromEvent(ev *Event, e *FileEvent) []string { return e.Hashes } -func (dfh *DefaultFieldHandlers) ResolveK8SGroups(ev *Event, e *UserSessionContext) []string { +func (dfh *FakeFieldHandlers) ResolveK8SGroups(ev *Event, e *UserSessionContext) []string { return e.K8SGroups } -func (dfh *DefaultFieldHandlers) ResolveK8SUID(ev *Event, e *UserSessionContext) string { - return e.K8SUID -} -func (dfh *DefaultFieldHandlers) ResolveK8SUsername(ev *Event, e *UserSessionContext) string { +func (dfh *FakeFieldHandlers) ResolveK8SUID(ev *Event, e *UserSessionContext) string { return e.K8SUID } +func (dfh *FakeFieldHandlers) ResolveK8SUsername(ev *Event, e *UserSessionContext) string { return e.K8SUsername } -func (dfh *DefaultFieldHandlers) ResolveModuleArgs(ev *Event, e *LoadModuleEvent) string { - return e.Args -} -func (dfh *DefaultFieldHandlers) ResolveModuleArgv(ev *Event, e *LoadModuleEvent) []string { +func (dfh *FakeFieldHandlers) ResolveModuleArgs(ev *Event, e *LoadModuleEvent) string { return e.Args } +func (dfh *FakeFieldHandlers) ResolveModuleArgv(ev *Event, e *LoadModuleEvent) []string { return e.Argv } -func (dfh *DefaultFieldHandlers) ResolveMountPointPath(ev *Event, e *MountEvent) string { +func (dfh *FakeFieldHandlers) ResolveMountPointPath(ev *Event, e *MountEvent) string { return e.MountPointPath } -func (dfh *DefaultFieldHandlers) ResolveMountRootPath(ev *Event, e *MountEvent) string { +func (dfh *FakeFieldHandlers) ResolveMountRootPath(ev *Event, e *MountEvent) string { return e.MountRootPath } -func (dfh *DefaultFieldHandlers) ResolveMountSourcePath(ev *Event, e *MountEvent) string { +func (dfh *FakeFieldHandlers) ResolveMountSourcePath(ev *Event, e *MountEvent) string { return e.MountSourcePath } -func (dfh *DefaultFieldHandlers) ResolveNetworkDeviceIfName(ev *Event, e *NetworkDeviceContext) string { +func (dfh *FakeFieldHandlers) ResolveNetworkDeviceIfName(ev *Event, e *NetworkDeviceContext) string { return e.IfName } -func (dfh *DefaultFieldHandlers) ResolvePackageName(ev *Event, e *FileEvent) string { return e.PkgName } -func (dfh *DefaultFieldHandlers) ResolvePackageSourceVersion(ev *Event, e *FileEvent) string { +func (dfh *FakeFieldHandlers) ResolvePackageName(ev *Event, e *FileEvent) string { return e.PkgName } +func (dfh *FakeFieldHandlers) ResolvePackageSourceVersion(ev *Event, e *FileEvent) string { return e.PkgSrcVersion } -func (dfh *DefaultFieldHandlers) ResolvePackageVersion(ev *Event, e *FileEvent) string { +func (dfh *FakeFieldHandlers) ResolvePackageVersion(ev *Event, e *FileEvent) string { return e.PkgVersion } -func (dfh *DefaultFieldHandlers) ResolveProcessArgs(ev *Event, e *Process) string { return e.Args } -func (dfh *DefaultFieldHandlers) ResolveProcessArgsFlags(ev *Event, e *Process) []string { - return e.Argv -} -func (dfh *DefaultFieldHandlers) ResolveProcessArgsOptions(ev *Event, e *Process) []string { +func (dfh *FakeFieldHandlers) ResolveProcessArgs(ev *Event, e *Process) string { return e.Args } +func (dfh *FakeFieldHandlers) ResolveProcessArgsFlags(ev *Event, e *Process) []string { return e.Argv } +func (dfh *FakeFieldHandlers) ResolveProcessArgsOptions(ev *Event, e *Process) []string { return e.Argv } -func (dfh *DefaultFieldHandlers) ResolveProcessArgsScrubbed(ev *Event, e *Process) string { +func (dfh *FakeFieldHandlers) ResolveProcessArgsScrubbed(ev *Event, e *Process) string { return e.ArgsScrubbed } -func (dfh *DefaultFieldHandlers) ResolveProcessArgsTruncated(ev *Event, e *Process) bool { +func (dfh *FakeFieldHandlers) ResolveProcessArgsTruncated(ev *Event, e *Process) bool { return e.ArgsTruncated } -func (dfh *DefaultFieldHandlers) ResolveProcessArgv(ev *Event, e *Process) []string { return e.Argv } -func (dfh *DefaultFieldHandlers) ResolveProcessArgv0(ev *Event, e *Process) string { return e.Argv0 } -func (dfh *DefaultFieldHandlers) ResolveProcessArgvScrubbed(ev *Event, e *Process) []string { +func (dfh *FakeFieldHandlers) ResolveProcessArgv(ev *Event, e *Process) []string { return e.Argv } +func (dfh *FakeFieldHandlers) ResolveProcessArgv0(ev *Event, e *Process) string { return e.Argv0 } +func (dfh *FakeFieldHandlers) ResolveProcessArgvScrubbed(ev *Event, e *Process) []string { return e.ArgvScrubbed } -func (dfh *DefaultFieldHandlers) ResolveProcessCreatedAt(ev *Event, e *Process) int { +func (dfh *FakeFieldHandlers) ResolveProcessCreatedAt(ev *Event, e *Process) int { return int(e.CreatedAt) } -func (dfh *DefaultFieldHandlers) ResolveProcessEnvp(ev *Event, e *Process) []string { return e.Envp } -func (dfh *DefaultFieldHandlers) ResolveProcessEnvs(ev *Event, e *Process) []string { return e.Envs } -func (dfh *DefaultFieldHandlers) ResolveProcessEnvsTruncated(ev *Event, e *Process) bool { +func (dfh *FakeFieldHandlers) ResolveProcessEnvp(ev *Event, e *Process) []string { return e.Envp } +func (dfh *FakeFieldHandlers) ResolveProcessEnvs(ev *Event, e *Process) []string { return e.Envs } +func (dfh *FakeFieldHandlers) ResolveProcessEnvsTruncated(ev *Event, e *Process) bool { return e.EnvsTruncated } -func (dfh *DefaultFieldHandlers) ResolveRights(ev *Event, e *FileFields) int { return int(e.Mode) } -func (dfh *DefaultFieldHandlers) ResolveSELinuxBoolName(ev *Event, e *SELinuxEvent) string { +func (dfh *FakeFieldHandlers) ResolveRights(ev *Event, e *FileFields) int { return int(e.Mode) } +func (dfh *FakeFieldHandlers) ResolveSELinuxBoolName(ev *Event, e *SELinuxEvent) string { return e.BoolName } -func (dfh *DefaultFieldHandlers) ResolveService(ev *Event, e *BaseEvent) string { return e.Service } -func (dfh *DefaultFieldHandlers) ResolveSetgidEGroup(ev *Event, e *SetgidEvent) string { - return e.EGroup -} -func (dfh *DefaultFieldHandlers) ResolveSetgidFSGroup(ev *Event, e *SetgidEvent) string { +func (dfh *FakeFieldHandlers) ResolveService(ev *Event, e *BaseEvent) string { return e.Service } +func (dfh *FakeFieldHandlers) ResolveSetgidEGroup(ev *Event, e *SetgidEvent) string { return e.EGroup } +func (dfh *FakeFieldHandlers) ResolveSetgidFSGroup(ev *Event, e *SetgidEvent) string { return e.FSGroup } -func (dfh *DefaultFieldHandlers) ResolveSetgidGroup(ev *Event, e *SetgidEvent) string { return e.Group } -func (dfh *DefaultFieldHandlers) ResolveSetuidEUser(ev *Event, e *SetuidEvent) string { return e.EUser } -func (dfh *DefaultFieldHandlers) ResolveSetuidFSUser(ev *Event, e *SetuidEvent) string { - return e.FSUser -} -func (dfh *DefaultFieldHandlers) ResolveSetuidUser(ev *Event, e *SetuidEvent) string { return e.User } -func (dfh *DefaultFieldHandlers) ResolveXAttrName(ev *Event, e *SetXAttrEvent) string { return e.Name } -func (dfh *DefaultFieldHandlers) ResolveXAttrNamespace(ev *Event, e *SetXAttrEvent) string { +func (dfh *FakeFieldHandlers) ResolveSetgidGroup(ev *Event, e *SetgidEvent) string { return e.Group } +func (dfh *FakeFieldHandlers) ResolveSetuidEUser(ev *Event, e *SetuidEvent) string { return e.EUser } +func (dfh *FakeFieldHandlers) ResolveSetuidFSUser(ev *Event, e *SetuidEvent) string { return e.FSUser } +func (dfh *FakeFieldHandlers) ResolveSetuidUser(ev *Event, e *SetuidEvent) string { return e.User } +func (dfh *FakeFieldHandlers) ResolveXAttrName(ev *Event, e *SetXAttrEvent) string { return e.Name } +func (dfh *FakeFieldHandlers) ResolveXAttrNamespace(ev *Event, e *SetXAttrEvent) string { return e.Namespace } diff --git a/pkg/security/secl/model/field_handlers_windows.go b/pkg/security/secl/model/field_handlers_windows.go index cc89f54be9ee06..001b4c7f60c224 100644 --- a/pkg/security/secl/model/field_handlers_windows.go +++ b/pkg/security/secl/model/field_handlers_windows.go @@ -99,39 +99,31 @@ type FieldHandlers interface { // custom handlers not tied to any fields ExtraFieldHandlers } -type DefaultFieldHandlers struct{} +type FakeFieldHandlers struct{} -func (dfh *DefaultFieldHandlers) ResolveContainerCreatedAt(ev *Event, e *ContainerContext) int { +func (dfh *FakeFieldHandlers) ResolveContainerCreatedAt(ev *Event, e *ContainerContext) int { return int(e.CreatedAt) } -func (dfh *DefaultFieldHandlers) ResolveContainerID(ev *Event, e *ContainerContext) string { - return e.ID -} -func (dfh *DefaultFieldHandlers) ResolveContainerTags(ev *Event, e *ContainerContext) []string { +func (dfh *FakeFieldHandlers) ResolveContainerID(ev *Event, e *ContainerContext) string { return e.ID } +func (dfh *FakeFieldHandlers) ResolveContainerTags(ev *Event, e *ContainerContext) []string { return e.Tags } -func (dfh *DefaultFieldHandlers) ResolveEventTime(ev *Event, e *BaseEvent) time.Time { - return e.Timestamp -} -func (dfh *DefaultFieldHandlers) ResolveEventTimestamp(ev *Event, e *BaseEvent) int { +func (dfh *FakeFieldHandlers) ResolveEventTime(ev *Event, e *BaseEvent) time.Time { return e.Timestamp } +func (dfh *FakeFieldHandlers) ResolveEventTimestamp(ev *Event, e *BaseEvent) int { return int(e.TimestampRaw) } -func (dfh *DefaultFieldHandlers) ResolveFileBasename(ev *Event, e *FileEvent) string { +func (dfh *FakeFieldHandlers) ResolveFileBasename(ev *Event, e *FileEvent) string { return e.BasenameStr } -func (dfh *DefaultFieldHandlers) ResolveFilePath(ev *Event, e *FileEvent) string { - return e.PathnameStr -} -func (dfh *DefaultFieldHandlers) ResolveProcessCmdLine(ev *Event, e *Process) string { - return e.CmdLine -} -func (dfh *DefaultFieldHandlers) ResolveProcessCmdLineScrubbed(ev *Event, e *Process) string { +func (dfh *FakeFieldHandlers) ResolveFilePath(ev *Event, e *FileEvent) string { return e.PathnameStr } +func (dfh *FakeFieldHandlers) ResolveProcessCmdLine(ev *Event, e *Process) string { return e.CmdLine } +func (dfh *FakeFieldHandlers) ResolveProcessCmdLineScrubbed(ev *Event, e *Process) string { return e.CmdLineScrubbed } -func (dfh *DefaultFieldHandlers) ResolveProcessCreatedAt(ev *Event, e *Process) int { +func (dfh *FakeFieldHandlers) ResolveProcessCreatedAt(ev *Event, e *Process) int { return int(e.CreatedAt) } -func (dfh *DefaultFieldHandlers) ResolveProcessEnvp(ev *Event, e *Process) []string { return e.Envp } -func (dfh *DefaultFieldHandlers) ResolveProcessEnvs(ev *Event, e *Process) []string { return e.Envs } -func (dfh *DefaultFieldHandlers) ResolveService(ev *Event, e *BaseEvent) string { return e.Service } -func (dfh *DefaultFieldHandlers) ResolveUser(ev *Event, e *Process) string { return e.User } +func (dfh *FakeFieldHandlers) ResolveProcessEnvp(ev *Event, e *Process) []string { return e.Envp } +func (dfh *FakeFieldHandlers) ResolveProcessEnvs(ev *Event, e *Process) []string { return e.Envs } +func (dfh *FakeFieldHandlers) ResolveService(ev *Event, e *BaseEvent) string { return e.Service } +func (dfh *FakeFieldHandlers) ResolveUser(ev *Event, e *Process) string { return e.User } diff --git a/pkg/security/secl/model/model.go b/pkg/security/secl/model/model.go index e3ad34f8dad2d6..20f2a115cb2819 100644 --- a/pkg/security/secl/model/model.go +++ b/pkg/security/secl/model/model.go @@ -42,7 +42,7 @@ func (m *Model) NewDefaultEventWithType(kind EventType) eval.Event { return &Event{ BaseEvent: BaseEvent{ Type: uint32(kind), - FieldHandlers: &DefaultFieldHandlers{}, + FieldHandlers: &FakeFieldHandlers{}, ContainerContext: &ContainerContext{}, }, } @@ -180,11 +180,11 @@ func initMember(member reflect.Value, deja map[string]bool) { } } -// NewDefaultEvent returns a new event using the default field handlers -func NewDefaultEvent() *Event { +// NewFakeEvent returns a new event using the default field handlers +func NewFakeEvent() *Event { return &Event{ BaseEvent: BaseEvent{ - FieldHandlers: &DefaultFieldHandlers{}, + FieldHandlers: &FakeFieldHandlers{}, ContainerContext: &ContainerContext{}, }, } @@ -555,11 +555,11 @@ type BaseExtraFieldHandlers interface { } // ResolveProcessCacheEntry stub implementation -func (dfh *DefaultFieldHandlers) ResolveProcessCacheEntry(_ *Event) (*ProcessCacheEntry, bool) { +func (dfh *FakeFieldHandlers) ResolveProcessCacheEntry(_ *Event) (*ProcessCacheEntry, bool) { return nil, false } // ResolveContainerContext stub implementation -func (dfh *DefaultFieldHandlers) ResolveContainerContext(_ *Event) (*ContainerContext, bool) { +func (dfh *FakeFieldHandlers) ResolveContainerContext(_ *Event) (*ContainerContext, bool) { return nil, false } diff --git a/pkg/security/secl/model/model_helpers_unix.go b/pkg/security/secl/model/model_helpers_unix.go index 60e260c6d5a5b2..51f7da762dadfa 100644 --- a/pkg/security/secl/model/model_helpers_unix.go +++ b/pkg/security/secl/model/model_helpers_unix.go @@ -346,12 +346,12 @@ func (pl *PathLeaf) MarshalBinary() ([]byte, error) { } // ResolveHashes resolves the hash of the provided file -func (dfh *DefaultFieldHandlers) ResolveHashes(_ EventType, _ *Process, _ *FileEvent) []string { +func (dfh *FakeFieldHandlers) ResolveHashes(_ EventType, _ *Process, _ *FileEvent) []string { return nil } // ResolveUserSessionContext resolves and updates the provided user session context -func (dfh *DefaultFieldHandlers) ResolveUserSessionContext(_ *UserSessionContext) {} +func (dfh *FakeFieldHandlers) ResolveUserSessionContext(_ *UserSessionContext) {} // SELinuxEventKind represents the event kind for SELinux events type SELinuxEventKind uint32 diff --git a/pkg/security/secl/model/model_test.go b/pkg/security/secl/model/model_test.go index 93952087150ddc..0d96280ef71785 100644 --- a/pkg/security/secl/model/model_test.go +++ b/pkg/security/secl/model/model_test.go @@ -121,7 +121,7 @@ func TestSetFieldValue(t *testing.T) { var readOnlyError *eval.ErrFieldReadOnly var fieldNotSupportedError *eval.ErrNotSupported - event := NewDefaultEvent() + event := NewFakeEvent() for _, field := range event.GetFields() { kind, err := event.GetFieldType(field) if err != nil { diff --git a/pkg/security/secl/model/model_unix.go b/pkg/security/secl/model/model_unix.go index b75a3b3341c8e3..ba8949259a6070 100644 --- a/pkg/security/secl/model/model_unix.go +++ b/pkg/security/secl/model/model_unix.go @@ -214,7 +214,7 @@ type Process struct { Variables eval.Variables `field:"-"` IsThread bool `field:"is_thread"` // SECLDoc[is_thread] Definition:`Indicates whether the process is considered a thread (that is, a child process that hasn't executed another program)` - IsExecChild bool `field:"-"` // Indicates whether the process is an exec child of its parent + IsExecExec bool `field:"-"` // Indicates whether the process is an exec following another exec IsParentMissing bool `field:"-"` // Indicates the direct parent is missing Source uint64 `field:"-"` diff --git a/pkg/security/secl/model/process_cache_entry_unix.go b/pkg/security/secl/model/process_cache_entry_unix.go index f66e9be8892469..0f0f700c072b9d 100644 --- a/pkg/security/secl/model/process_cache_entry_unix.go +++ b/pkg/security/secl/model/process_cache_entry_unix.go @@ -93,7 +93,7 @@ func (pc *ProcessCacheEntry) Exec(entry *ProcessCacheEntry) { // use exec time as exit time pc.Exit(entry.ExecTime) - entry.Process.IsExecChild = !pc.IsThread + entry.Process.IsExecExec = !pc.IsThread // keep some context copyProcessContext(pc, entry) diff --git a/pkg/security/secl/rules/policy_test.go b/pkg/security/secl/rules/policy_test.go index be85d4e93dd211..150509cb2ac5b8 100644 --- a/pkg/security/secl/rules/policy_test.go +++ b/pkg/security/secl/rules/policy_test.go @@ -67,7 +67,7 @@ func TestMacroMerge(t *testing.T) { t.Fatal(err) } - event := model.NewDefaultEvent() + event := model.NewFakeEvent() event.SetFieldValue("open.file.path", "/tmp/test") event.SetFieldValue("process.comm", "/usr/bin/vi") @@ -258,7 +258,7 @@ func TestActionSetVariable(t *testing.T) { t.Fatal("failed to find test_rule in ruleset") } - event := model.NewDefaultEvent() + event := model.NewFakeEvent() event.Type = uint32(model.FileOpenEventType) processCacheEntry := &model.ProcessCacheEntry{} processCacheEntry.Retain() diff --git a/pkg/security/secl/rules/ruleset.go b/pkg/security/secl/rules/ruleset.go index df856f157de008..bc887b4ab8524d 100644 --- a/pkg/security/secl/rules/ruleset.go +++ b/pkg/security/secl/rules/ruleset.go @@ -536,7 +536,7 @@ func (rs *RuleSet) GetEventApprovers(eventType eval.EventType, fieldCaps FieldCa return nil, ErrNoEventTypeBucket{EventType: eventType} } - return GetApprovers(bucket.rules, model.NewDefaultEvent(), fieldCaps) + return GetApprovers(bucket.rules, model.NewFakeEvent(), fieldCaps) } // GetFieldValues returns all the values of the given field @@ -648,12 +648,12 @@ func (rs *RuleSet) Evaluate(event eval.Event) bool { rs.logger.Tracef("Rule `%s` matches with event `%s`\n", rule.ID, event) } - rs.NotifyRuleMatch(rule, event) - result = true - if err := rs.runRuleActions(event, ctx, rule); err != nil { rs.logger.Errorf("Error while executing rule actions: %s", err) } + + rs.NotifyRuleMatch(rule, event) + result = true } } diff --git a/pkg/security/secl/rules/ruleset_test.go b/pkg/security/secl/rules/ruleset_test.go index 1640f7f942aada..23e6b3563a22a0 100644 --- a/pkg/security/secl/rules/ruleset_test.go +++ b/pkg/security/secl/rules/ruleset_test.go @@ -94,13 +94,13 @@ func addRuleExpr(t *testing.T, rs *RuleSet, exprs ...string) { } } -func newDefaultEvent() eval.Event { - return model.NewDefaultEvent() +func newFakeEvent() eval.Event { + return model.NewFakeEvent() } func newRuleSet() *RuleSet { ruleOpts, evalOpts := NewEvalOpts(map[eval.EventType]bool{"*": true}) - return NewRuleSet(&model.Model{}, newDefaultEvent, ruleOpts, evalOpts) + return NewRuleSet(&model.Model{}, newFakeEvent, ruleOpts, evalOpts) } func TestRuleBuckets(t *testing.T) { @@ -144,13 +144,13 @@ func TestRuleSetDiscarders(t *testing.T) { addRuleExpr(t, rs, exprs...) - ev1 := model.NewDefaultEvent() + ev1 := model.NewFakeEvent() ev1.Type = uint32(model.FileOpenEventType) ev1.SetFieldValue("open.file.path", "/usr/local/bin/rootkit") ev1.SetFieldValue("open.flags", syscall.O_RDONLY) ev1.SetFieldValue("process.uid", 0) - ev2 := model.NewDefaultEvent() + ev2 := model.NewFakeEvent() ev2.Type = uint32(model.FileMkdirEventType) ev2.SetFieldValue("mkdir.file.path", "/usr/local/bin/rootkit") ev2.SetFieldValue("mkdir.mode", 0777) @@ -584,7 +584,7 @@ func TestGetRuleEventType(t *testing.T) { t.Fatalf("should get an event type: %s", err) } - event := model.NewDefaultEvent() + event := model.NewFakeEvent() fieldEventType, err := event.GetFieldEventType("open.file.name") if err != nil { t.Fatal("should get a field event type") diff --git a/pkg/security/security_profile/activity_tree/activity_tree.go b/pkg/security/security_profile/activity_tree/activity_tree.go index aeade509435dc3..806044b82fefeb 100644 --- a/pkg/security/security_profile/activity_tree/activity_tree.go +++ b/pkg/security/security_profile/activity_tree/activity_tree.go @@ -559,8 +559,8 @@ func (at *ActivityTree) findBranch(parent ProcessNodeParent, branch []*model.Pro return nil, len(branch) - i, true } - // make sure we properly update the isExecChild status - matchingNode.Process.IsExecChild = matchingNode.Process.IsExecChild || branchCursor.IsExecChild + // make sure we properly update the IsExecExec status + matchingNode.Process.IsExecExec = matchingNode.Process.IsExecExec || branchCursor.IsExecExec // here is the current state of the tree: // parent -> treeNodeToRebase -> [...] -> matchingNode @@ -569,10 +569,10 @@ func (at *ActivityTree) findBranch(parent ProcessNodeParent, branch []*model.Pro at.rebaseTree(parent, treeNodeToRebaseIndex, parent, branch[i:], generationType, resolvers) return matchingNode, len(branch) - i, true - } + // are we looking for an exec child ? - if siblings := parent.GetSiblings(); branchCursor.IsExecChild && siblings != nil { + if siblings := parent.GetSiblings(); branchCursor.IsExecExec && siblings != nil { // if yes, then look for branchCursor in the siblings of the parent of children matchingNode, treeNodeToRebaseIndex = at.findProcessCacheEntryInTree(*siblings, branchCursor) @@ -585,8 +585,8 @@ func (at *ActivityTree) findBranch(parent ProcessNodeParent, branch []*model.Pro return nil, len(branch) - i, true } - // make sure we properly update the isExecChild status - matchingNode.Process.IsExecChild = matchingNode.Process.IsExecChild || branchCursor.IsExecChild + // make sure we properly update the IsExecExec status + matchingNode.Process.IsExecExec = matchingNode.Process.IsExecExec || branchCursor.IsExecExec // here is the current state of the tree: // parent of parent -> treeNodeToRebase -> [...] -> matchingNode @@ -601,7 +601,7 @@ func (at *ActivityTree) findBranch(parent ProcessNodeParent, branch []*model.Pro // We didn't find the current entry anywhere, has it execed into something else ? (i.e. are we missing something // in the profile ?) if i-1 >= 0 { - if branch[i-1].IsExecChild { + if branch[i-1].IsExecExec { continue } } @@ -623,7 +623,7 @@ func (at *ActivityTree) rebaseTree(parent ProcessNodeParent, childIndexToRebase // matching "isExecChild = true" nodes, except parent.GetChildren()[childIndexToRebase] that might be a "isExecChild // = false" node. To be safe, check if the 2 top level nodes match if one of them is an "isExecChild = true" node. childToRebase := (*parent.GetChildren())[childIndexToRebase] - if topLevelNode := branchToInsert[len(branchToInsert)-1]; !topLevelNode.IsExecChild || !childToRebase.Process.IsExecChild { + if topLevelNode := branchToInsert[len(branchToInsert)-1]; !topLevelNode.IsExecExec || !childToRebase.Process.IsExecExec { if childToRebase.Matches(&topLevelNode.Process, at.differentiateArgs, true) { // ChildNodeToRebase and topLevelNode match and need to be merged, rebase the one in the profile, and insert // the remaining nodes of the branch on top of it @@ -656,7 +656,7 @@ func (at *ActivityTree) rebaseTree(parent ProcessNodeParent, childIndexToRebase } // mark the rebased node as an exec child - (*parent.GetChildren())[childIndexToRebase].Process.IsExecChild = true + (*parent.GetChildren())[childIndexToRebase].Process.IsExecExec = true if rebaseRoot == nil { rebaseRoot = (*parent.GetChildren())[childIndexToRebase] @@ -701,7 +701,7 @@ func (at *ActivityTree) findProcessCacheEntryInTree(tree []*ProcessNode, entry * func (at *ActivityTree) findProcessCacheEntryInChildExecedNodes(child *ProcessNode, entry *model.ProcessCacheEntry) *ProcessNode { // fast path for _, node := range child.Children { - if node.Process.IsExecChild { + if node.Process.IsExecExec { // does this execed child match the entry ? if node.Matches(&entry.Process, at.differentiateArgs, true) { return node @@ -725,7 +725,7 @@ func (at *ActivityTree) findProcessCacheEntryInChildExecedNodes(child *ProcessNo // look for an execed child for _, node := range cursor.Children { - if node.Process.IsExecChild && !slices.Contains(visited, node) { + if node.Process.IsExecExec && !slices.Contains(visited, node) { // there should always be only one // does this execed child match the entry ? diff --git a/pkg/security/security_profile/activity_tree/activity_tree_proto_dec_v1.go b/pkg/security/security_profile/activity_tree/activity_tree_proto_dec_v1.go index da749fe0437e1f..cbe3a517de82e6 100644 --- a/pkg/security/security_profile/activity_tree/activity_tree_proto_dec_v1.go +++ b/pkg/security/security_profile/activity_tree/activity_tree_proto_dec_v1.go @@ -85,7 +85,7 @@ func protoDecodeProcessNode(p *adproto.ProcessInfo) model.Process { PPid: p.Ppid, Cookie: p.Cookie64, IsThread: p.IsThread, - IsExecChild: p.IsExecChild, + IsExecExec: p.IsExecChild, FileEvent: *protoDecodeFileEvent(p.File), ContainerID: p.ContainerId, SpanID: p.SpanId, diff --git a/pkg/security/security_profile/activity_tree/activity_tree_proto_enc_v1.go b/pkg/security/security_profile/activity_tree/activity_tree_proto_enc_v1.go index e701d7c338c528..4f13f88743b292 100644 --- a/pkg/security/security_profile/activity_tree/activity_tree_proto_enc_v1.go +++ b/pkg/security/security_profile/activity_tree/activity_tree_proto_enc_v1.go @@ -83,7 +83,7 @@ func processNodeToProto(p *model.Process) *adproto.ProcessInfo { Ppid: p.PPid, Cookie64: p.Cookie, IsThread: p.IsThread, - IsExecChild: p.IsExecChild, + IsExecChild: p.IsExecExec, File: fileEventToProto(&p.FileEvent), ContainerId: p.ContainerID, SpanId: p.SpanID, diff --git a/pkg/security/security_profile/activity_tree/activity_tree_test.go b/pkg/security/security_profile/activity_tree/activity_tree_test.go index 9b06925bd8a969..bfb07b6ef8f506 100644 --- a/pkg/security/security_profile/activity_tree/activity_tree_test.go +++ b/pkg/security/security_profile/activity_tree/activity_tree_test.go @@ -34,7 +34,7 @@ func TestInsertFileEvent(t *testing.T) { "/tmp/bar/test", } expectedDebugOuput := strings.TrimSpace(` -- process: /test/pan (argv0: pan) (is_exec_child:false) +- process: /test/pan (argv0: pan) (is_exec_exec:false) files: - hello - test @@ -52,7 +52,7 @@ func TestInsertFileEvent(t *testing.T) { for _, path := range pathToInserts { event := &model.Event{ BaseEvent: model.BaseEvent{ - FieldHandlers: &model.DefaultFieldHandlers{}, + FieldHandlers: &model.FakeFieldHandlers{}, }, Open: model.OpenEvent{ File: model.FileEvent{ @@ -117,9 +117,22 @@ func newExecTestEventWithAncestors(lineage []model.Process) *model.Event { } cursor := ancestor + maxPid := uint32(len(lineageDup)) + 1 + + nextPid := func(current uint32, IsExecExec bool) uint32 { + if IsExecExec { + return current + } + return current - 1 + } + + currentPid := maxPid - 1 for _, p := range lineageDup[1:] { cursor.Process = p + cursor.Process.Pid = currentPid + currentPid = nextPid(currentPid, cursor.Process.IsExecExec) cursor.Ancestor = new(model.ProcessCacheEntry) + cursor.Parent = &cursor.Ancestor.Process cursor = cursor.Ancestor } @@ -138,16 +151,19 @@ func newExecTestEventWithAncestors(lineage []model.Process) *model.Event { }, } + lineageDup[0].Pid = nextPid(maxPid, !lineageDup[0].IsExecExec) + evt := &model.Event{ BaseEvent: model.BaseEvent{ Type: uint32(model.ExecEventType), - FieldHandlers: &model.DefaultFieldHandlers{}, + FieldHandlers: &model.FakeFieldHandlers{}, ContainerContext: &model.ContainerContext{}, ProcessContext: &model.ProcessContext{}, ProcessCacheEntry: &model.ProcessCacheEntry{ ProcessContext: model.ProcessContext{ Process: lineageDup[0], Ancestor: ancestor, + Parent: &ancestor.Process, }, }, }, diff --git a/pkg/security/security_profile/activity_tree/process_node.go b/pkg/security/security_profile/activity_tree/process_node.go index a2c0e190fb6786..8017412d21428a 100644 --- a/pkg/security/security_profile/activity_tree/process_node.go +++ b/pkg/security/security_profile/activity_tree/process_node.go @@ -105,7 +105,7 @@ func (pn *ProcessNode) getNodeLabel(args string) string { // nolint: unused func (pn *ProcessNode) debug(w io.Writer, prefix string) { - fmt.Fprintf(w, "%s- process: %s (argv0: %s) (is_exec_child:%v)\n", prefix, pn.Process.FileEvent.PathnameStr, pn.Process.Argv0, pn.Process.IsExecChild) + fmt.Fprintf(w, "%s- process: %s (argv0: %s) (is_exec_exec:%v)\n", prefix, pn.Process.FileEvent.PathnameStr, pn.Process.Argv0, pn.Process.IsExecExec) if len(pn.Files) > 0 { fmt.Fprintf(w, "%s files:\n", prefix) sortedFiles := make([]*FileNode, 0, len(pn.Files)) diff --git a/pkg/security/security_profile/profile/manager_test.go b/pkg/security/security_profile/profile/manager_test.go index 724aeb727bfcd0..3c6a69cea9641e 100644 --- a/pkg/security/security_profile/profile/manager_test.go +++ b/pkg/security/security_profile/profile/manager_test.go @@ -39,7 +39,7 @@ type testIteration struct { } func craftFakeEvent(t0 time.Time, ti *testIteration, defaultContainerID string) *model.Event { - event := model.NewDefaultEvent() + event := model.NewFakeEvent() event.Type = uint32(ti.eventType) event.ContainerContext.CreatedAt = uint64(t0.Add(ti.containerCreatedAt).UnixNano()) event.TimestampRaw = uint64(t0.Add(ti.eventTimestampRaw).UnixNano()) diff --git a/pkg/security/serializers/deserializers.go b/pkg/security/serializers/deserializers.go index c6aae67ffd17ce..d9d54333aa2151 100644 --- a/pkg/security/serializers/deserializers.go +++ b/pkg/security/serializers/deserializers.go @@ -63,7 +63,7 @@ func newProcess(ps *ProcessSerializer) model.Process { Envs: ps.Envs, EnvsTruncated: ps.EnvsTruncated, IsThread: ps.IsThread, - IsExecChild: ps.IsExecChild, + IsExecExec: ps.IsExecExec, PIDContext: model.PIDContext{ Pid: ps.Pid, Tid: ps.Tid, @@ -103,7 +103,7 @@ func UnmarshalEvent(raw []byte) (*model.Event, error) { event := model.Event{ BaseEvent: model.BaseEvent{ Type: uint32(model.ExecEventType), - FieldHandlers: &model.DefaultFieldHandlers{}, + FieldHandlers: &model.FakeFieldHandlers{}, ContainerContext: &model.ContainerContext{}, ProcessContext: &model.ProcessContext{ Process: process, diff --git a/pkg/security/serializers/serializers_base.go b/pkg/security/serializers/serializers_base.go index c276ea1555d20c..b749f41e6be467 100644 --- a/pkg/security/serializers/serializers_base.go +++ b/pkg/security/serializers/serializers_base.go @@ -8,8 +8,12 @@ package serializers import ( + "strings" + + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/util/scrubber" ) // ContainerContextSerializer serializes a container context to JSON @@ -19,8 +23,14 @@ type ContainerContextSerializer struct { ID string `json:"id,omitempty"` // Creation time of the container CreatedAt *utils.EasyjsonTime `json:"created_at,omitempty"` + // Variables values + Variables Variables `json:"variables,omitempty"` } +// Variables serializes the variable values +// easyjson:json +type Variables map[string]interface{} + // MatchedRuleSerializer serializes a rule // easyjson:json type MatchedRuleSerializer struct { @@ -51,6 +61,8 @@ type EventContextSerializer struct { MatchedRules []MatchedRuleSerializer `json:"matched_rules,omitempty"` // Origin of the event Origin string `json:"origin,omitempty"` + // Variables values + Variables Variables `json:"variables,omitempty"` } // ProcessContextSerializer serializes a process context to JSON @@ -61,6 +73,8 @@ type ProcessContextSerializer struct { Parent *ProcessSerializer `json:"parent,omitempty"` // Ancestor processes Ancestors []*ProcessSerializer `json:"ancestors,omitempty"` + // Variables values + Variables Variables `json:"variables,omitempty"` } // IPPortSerializer is used to serialize an IP and Port context to JSON @@ -210,17 +224,18 @@ func newExitEventSerializer(e *model.Event) *ExitEventSerializer { } // NewBaseEventSerializer creates a new event serializer based on the event type -func NewBaseEventSerializer(event *model.Event) *BaseEventSerializer { +func NewBaseEventSerializer(event *model.Event, opts *eval.Opts) *BaseEventSerializer { pc := event.ProcessContext eventType := model.EventType(event.Type) s := &BaseEventSerializer{ EventContextSerializer: EventContextSerializer{ - Name: eventType.String(), - Origin: event.Origin, + Name: eventType.String(), + Origin: event.Origin, + Variables: newVariablesContext(event, opts, ""), }, - ProcessContextSerializer: newProcessContextSerializer(pc, event), + ProcessContextSerializer: newProcessContextSerializer(pc, event, opts), Date: utils.NewEasyjsonTime(event.ResolveEventTime()), } @@ -249,3 +264,40 @@ func NewBaseEventSerializer(event *model.Event) *BaseEventSerializer { return s } + +func newVariablesContext(e *model.Event, opts *eval.Opts, prefix string) (variables Variables) { + if opts != nil && opts.VariableStore != nil { + store := opts.VariableStore + for name, variable := range store.Variables { + if (prefix != "" && !strings.HasPrefix(name, prefix)) || + (prefix == "" && strings.Contains(name, ".")) { + continue + } + + evaluator := variable.GetEvaluator() + if evaluator, ok := evaluator.(eval.Evaluator); ok { + value := evaluator.Eval(eval.NewContext(e)) + if variables == nil { + variables = Variables{} + } + if value != nil { + switch value := value.(type) { + case []string: + for _, value := range value { + if scrubbed, err := scrubber.ScrubString(value); err == nil { + variables[strings.TrimPrefix(name, prefix)] = scrubbed + } + } + case string: + if scrubbed, err := scrubber.ScrubString(value); err == nil { + variables[strings.TrimPrefix(name, prefix)] = scrubbed + } + default: + variables[strings.TrimPrefix(name, prefix)] = value + } + } + } + } + } + return variables +} diff --git a/pkg/security/serializers/serializers_base_linux_easyjson.go b/pkg/security/serializers/serializers_base_linux_easyjson.go index ca7168507a3776..8e98c091796d83 100644 --- a/pkg/security/serializers/serializers_base_linux_easyjson.go +++ b/pkg/security/serializers/serializers_base_linux_easyjson.go @@ -21,7 +21,69 @@ var ( _ easyjson.Marshaler ) -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers(in *jlexer.Lexer, out *ProcessContextSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers(in *jlexer.Lexer, out *Variables) { + isTopLevel := in.IsStart() + if in.IsNull() { + in.Skip() + } else { + in.Delim('{') + *out = make(Variables) + for !in.IsDelim('}') { + key := string(in.String()) + in.WantColon() + var v1 interface{} + if m, ok := v1.(easyjson.Unmarshaler); ok { + m.UnmarshalEasyJSON(in) + } else if m, ok := v1.(json.Unmarshaler); ok { + _ = m.UnmarshalJSON(in.Raw()) + } else { + v1 = in.Interface() + } + (*out)[key] = v1 + in.WantComma() + } + in.Delim('}') + } + if isTopLevel { + in.Consumed() + } +} +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers(out *jwriter.Writer, in Variables) { + if in == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 { + out.RawString(`null`) + } else { + out.RawByte('{') + v2First := true + for v2Name, v2Value := range in { + if v2First { + v2First = false + } else { + out.RawByte(',') + } + out.String(string(v2Name)) + out.RawByte(':') + if m, ok := v2Value.(easyjson.Marshaler); ok { + m.MarshalEasyJSON(out) + } else if m, ok := v2Value.(json.Marshaler); ok { + out.Raw(m.MarshalJSON()) + } else { + out.Raw(json.Marshal(v2Value)) + } + } + out.RawByte('}') + } +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v Variables) MarshalEasyJSON(w *jwriter.Writer) { + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *Variables) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers(l, v) +} +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers1(in *jlexer.Lexer, out *ProcessContextSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -67,21 +129,23 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers(in out.Ancestors = (out.Ancestors)[:0] } for !in.IsDelim(']') { - var v1 *ProcessSerializer + var v3 *ProcessSerializer if in.IsNull() { in.Skip() - v1 = nil + v3 = nil } else { - if v1 == nil { - v1 = new(ProcessSerializer) + if v3 == nil { + v3 = new(ProcessSerializer) } - (*v1).UnmarshalEasyJSON(in) + (*v3).UnmarshalEasyJSON(in) } - out.Ancestors = append(out.Ancestors, v1) + out.Ancestors = append(out.Ancestors, v3) in.WantComma() } in.Delim(']') } + case "variables": + (out.Variables).UnmarshalEasyJSON(in) case "pid": out.Pid = uint32(in.Uint32()) case "ppid": @@ -214,9 +278,9 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers(in out.Args = (out.Args)[:0] } for !in.IsDelim(']') { - var v2 string - v2 = string(in.String()) - out.Args = append(out.Args, v2) + var v4 string + v4 = string(in.String()) + out.Args = append(out.Args, v4) in.WantComma() } in.Delim(']') @@ -239,9 +303,9 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers(in out.Envs = (out.Envs)[:0] } for !in.IsDelim(']') { - var v3 string - v3 = string(in.String()) - out.Envs = append(out.Envs, v3) + var v5 string + v5 = string(in.String()) + out.Envs = append(out.Envs, v5) in.WantComma() } in.Delim(']') @@ -253,7 +317,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers(in case "is_kworker": out.IsKworker = bool(in.Bool()) case "is_exec_child": - out.IsExecChild = bool(in.Bool()) + out.IsExecExec = bool(in.Bool()) case "source": out.Source = string(in.String()) default: @@ -266,7 +330,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers(in in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers(out *jwriter.Writer, in ProcessContextSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers1(out *jwriter.Writer, in ProcessContextSerializer) { out.RawByte('{') first := true _ = first @@ -286,19 +350,29 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers(ou } { out.RawByte('[') - for v4, v5 := range in.Ancestors { - if v4 > 0 { + for v6, v7 := range in.Ancestors { + if v6 > 0 { out.RawByte(',') } - if v5 == nil { + if v7 == nil { out.RawString("null") } else { - (*v5).MarshalEasyJSON(out) + (*v7).MarshalEasyJSON(out) } } out.RawByte(']') } } + if len(in.Variables) != 0 { + const prefix string = ",\"variables\":" + if first { + first = false + out.RawString(prefix[1:]) + } else { + out.RawString(prefix) + } + (in.Variables).MarshalEasyJSON(out) + } if in.Pid != 0 { const prefix string = ",\"pid\":" if first { @@ -419,11 +493,11 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers(ou out.RawString(prefix) { out.RawByte('[') - for v6, v7 := range in.Args { - if v6 > 0 { + for v8, v9 := range in.Args { + if v8 > 0 { out.RawByte(',') } - out.String(string(v7)) + out.String(string(v9)) } out.RawByte(']') } @@ -438,11 +512,11 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers(ou out.RawString(prefix) { out.RawByte('[') - for v8, v9 := range in.Envs { - if v8 > 0 { + for v10, v11 := range in.Envs { + if v10 > 0 { out.RawByte(',') } - out.String(string(v9)) + out.String(string(v11)) } out.RawByte(']') } @@ -462,10 +536,10 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers(ou out.RawString(prefix) out.Bool(bool(in.IsKworker)) } - if in.IsExecChild { + if in.IsExecExec { const prefix string = ",\"is_exec_child\":" out.RawString(prefix) - out.Bool(bool(in.IsExecChild)) + out.Bool(bool(in.IsExecExec)) } if in.Source != "" { const prefix string = ",\"source\":" @@ -477,14 +551,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers(ou // MarshalEasyJSON supports easyjson.Marshaler interface func (v ProcessContextSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers1(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *ProcessContextSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers1(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers1(in *jlexer.Lexer, out *NetworkContextSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers2(in *jlexer.Lexer, out *NetworkContextSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -533,7 +607,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers1(i in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers1(out *jwriter.Writer, in NetworkContextSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers2(out *jwriter.Writer, in NetworkContextSerializer) { out.RawByte('{') first := true _ = first @@ -578,14 +652,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers1(o // MarshalEasyJSON supports easyjson.Marshaler interface func (v NetworkContextSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers1(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers2(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *NetworkContextSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers1(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers2(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers2(in *jlexer.Lexer, out *MatchedRuleSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers3(in *jlexer.Lexer, out *MatchedRuleSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -624,9 +698,9 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers2(i out.Tags = (out.Tags)[:0] } for !in.IsDelim(']') { - var v10 string - v10 = string(in.String()) - out.Tags = append(out.Tags, v10) + var v12 string + v12 = string(in.String()) + out.Tags = append(out.Tags, v12) in.WantComma() } in.Delim(']') @@ -645,7 +719,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers2(i in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers2(out *jwriter.Writer, in MatchedRuleSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers3(out *jwriter.Writer, in MatchedRuleSerializer) { out.RawByte('{') first := true _ = first @@ -675,11 +749,11 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers2(o } { out.RawByte('[') - for v11, v12 := range in.Tags { - if v11 > 0 { + for v13, v14 := range in.Tags { + if v13 > 0 { out.RawByte(',') } - out.String(string(v12)) + out.String(string(v14)) } out.RawByte(']') } @@ -709,14 +783,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers2(o // MarshalEasyJSON supports easyjson.Marshaler interface func (v MatchedRuleSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers2(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers3(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *MatchedRuleSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers2(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers3(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers3(in *jlexer.Lexer, out *IPPortSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers4(in *jlexer.Lexer, out *IPPortSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -749,7 +823,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers3(i in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers3(out *jwriter.Writer, in IPPortSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers4(out *jwriter.Writer, in IPPortSerializer) { out.RawByte('{') first := true _ = first @@ -768,14 +842,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers3(o // MarshalEasyJSON supports easyjson.Marshaler interface func (v IPPortSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers3(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers4(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *IPPortSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers3(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers4(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers4(in *jlexer.Lexer, out *IPPortFamilySerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(in *jlexer.Lexer, out *IPPortFamilySerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -810,7 +884,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers4(i in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers4(out *jwriter.Writer, in IPPortFamilySerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(out *jwriter.Writer, in IPPortFamilySerializer) { out.RawByte('{') first := true _ = first @@ -834,14 +908,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers4(o // MarshalEasyJSON supports easyjson.Marshaler interface func (v IPPortFamilySerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers4(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *IPPortFamilySerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers4(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(in *jlexer.Lexer, out *ExitEventSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(in *jlexer.Lexer, out *ExitEventSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -874,7 +948,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(i in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(out *jwriter.Writer, in ExitEventSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(out *jwriter.Writer, in ExitEventSerializer) { out.RawByte('{') first := true _ = first @@ -893,14 +967,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(o // MarshalEasyJSON supports easyjson.Marshaler interface func (v ExitEventSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *ExitEventSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(in *jlexer.Lexer, out *EventContextSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(in *jlexer.Lexer, out *EventContextSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -943,15 +1017,17 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(i out.MatchedRules = (out.MatchedRules)[:0] } for !in.IsDelim(']') { - var v13 MatchedRuleSerializer - (v13).UnmarshalEasyJSON(in) - out.MatchedRules = append(out.MatchedRules, v13) + var v15 MatchedRuleSerializer + (v15).UnmarshalEasyJSON(in) + out.MatchedRules = append(out.MatchedRules, v15) in.WantComma() } in.Delim(']') } case "origin": out.Origin = string(in.String()) + case "variables": + (out.Variables).UnmarshalEasyJSON(in) default: in.SkipRecursive() } @@ -962,7 +1038,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(i in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(out *jwriter.Writer, in EventContextSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(out *jwriter.Writer, in EventContextSerializer) { out.RawByte('{') first := true _ = first @@ -1012,11 +1088,11 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(o } { out.RawByte('[') - for v14, v15 := range in.MatchedRules { - if v14 > 0 { + for v16, v17 := range in.MatchedRules { + if v16 > 0 { out.RawByte(',') } - (v15).MarshalEasyJSON(out) + (v17).MarshalEasyJSON(out) } out.RawByte(']') } @@ -1031,19 +1107,29 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(o } out.String(string(in.Origin)) } + if len(in.Variables) != 0 { + const prefix string = ",\"variables\":" + if first { + first = false + out.RawString(prefix[1:]) + } else { + out.RawString(prefix) + } + (in.Variables).MarshalEasyJSON(out) + } out.RawByte('}') } // MarshalEasyJSON supports easyjson.Marshaler interface func (v EventContextSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *EventContextSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(in *jlexer.Lexer, out *DNSQuestionSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(in *jlexer.Lexer, out *DNSQuestionSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -1082,7 +1168,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(i in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(out *jwriter.Writer, in DNSQuestionSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(out *jwriter.Writer, in DNSQuestionSerializer) { out.RawByte('{') first := true _ = first @@ -1116,14 +1202,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(o // MarshalEasyJSON supports easyjson.Marshaler interface func (v DNSQuestionSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *DNSQuestionSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(in *jlexer.Lexer, out *DNSEventSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(in *jlexer.Lexer, out *DNSEventSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -1156,7 +1242,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(i in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(out *jwriter.Writer, in DNSEventSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(out *jwriter.Writer, in DNSEventSerializer) { out.RawByte('{') first := true _ = first @@ -1175,14 +1261,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(o // MarshalEasyJSON supports easyjson.Marshaler interface func (v DNSEventSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *DNSEventSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(in *jlexer.Lexer, out *DDContextSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers10(in *jlexer.Lexer, out *DDContextSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -1215,7 +1301,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(i in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(out *jwriter.Writer, in DDContextSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers10(out *jwriter.Writer, in DDContextSerializer) { out.RawByte('{') first := true _ = first @@ -1240,14 +1326,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(o // MarshalEasyJSON supports easyjson.Marshaler interface func (v DDContextSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers10(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *DDContextSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers10(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers10(in *jlexer.Lexer, out *ContainerContextSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers11(in *jlexer.Lexer, out *ContainerContextSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -1280,6 +1366,8 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers10( in.AddError((*out.CreatedAt).UnmarshalJSON(data)) } } + case "variables": + (out.Variables).UnmarshalEasyJSON(in) default: in.SkipRecursive() } @@ -1290,7 +1378,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers10( in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers10(out *jwriter.Writer, in ContainerContextSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers11(out *jwriter.Writer, in ContainerContextSerializer) { out.RawByte('{') first := true _ = first @@ -1310,19 +1398,29 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers10( } (*in.CreatedAt).MarshalEasyJSON(out) } + if len(in.Variables) != 0 { + const prefix string = ",\"variables\":" + if first { + first = false + out.RawString(prefix[1:]) + } else { + out.RawString(prefix) + } + (in.Variables).MarshalEasyJSON(out) + } out.RawByte('}') } // MarshalEasyJSON supports easyjson.Marshaler interface func (v ContainerContextSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers10(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers11(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *ContainerContextSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers10(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers11(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers11(in *jlexer.Lexer, out *BaseEventSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers12(in *jlexer.Lexer, out *BaseEventSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -1401,7 +1499,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers11( in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers11(out *jwriter.Writer, in BaseEventSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers12(out *jwriter.Writer, in BaseEventSerializer) { out.RawByte('{') first := true _ = first @@ -1466,10 +1564,10 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers11( // MarshalEasyJSON supports easyjson.Marshaler interface func (v BaseEventSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers11(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers12(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *BaseEventSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers11(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers12(l, v) } diff --git a/pkg/security/serializers/serializers_linux.go b/pkg/security/serializers/serializers_linux.go index 3a6a795aac4dc3..6fed6652656bc9 100644 --- a/pkg/security/serializers/serializers_linux.go +++ b/pkg/security/serializers/serializers_linux.go @@ -236,10 +236,12 @@ type ProcessSerializer struct { IsThread bool `json:"is_thread,omitempty"` // Indicates whether the process is a kworker IsKworker bool `json:"is_kworker,omitempty"` - // Indicates wether the process is an exec child of its parent - IsExecChild bool `json:"is_exec_child,omitempty"` + // Indicates whether the process is an exec following another exec + IsExecExec bool `json:"is_exec_child,omitempty"` // Process source Source string `json:"source,omitempty"` + // Variables values + Variables Variables `json:"variables,omitempty"` } // FileEventSerializer serializes a file event to JSON @@ -556,7 +558,7 @@ func newCredentialsSerializer(ce *model.Credentials) *CredentialsSerializer { } } -func newProcessSerializer(ps *model.Process, e *model.Event) *ProcessSerializer { +func newProcessSerializer(ps *model.Process, e *model.Event, opts *eval.Opts) *ProcessSerializer { if ps.IsNotKworker() { argv := e.FieldHandlers.ResolveProcessArgvScrubbed(e, ps) argvTruncated := e.FieldHandlers.ResolveProcessArgsTruncated(e, ps) @@ -582,8 +584,9 @@ func newProcessSerializer(ps *model.Process, e *model.Event) *ProcessSerializer EnvsTruncated: envsTruncated, IsThread: ps.IsThread, IsKworker: ps.IsKworker, - IsExecChild: ps.IsExecChild, + IsExecExec: ps.IsExecExec, Source: model.ProcessSourceToString(ps.Source), + Variables: newVariablesContext(e, opts, "process."), } if ps.HasInterpreter() { @@ -610,14 +613,15 @@ func newProcessSerializer(ps *model.Process, e *model.Event) *ProcessSerializer CreatedAt: getTimeIfNotZero(time.Unix(0, int64(e.GetContainerCreatedAt()))), } } + return psSerializer } return &ProcessSerializer{ - Pid: ps.Pid, - Tid: ps.Tid, - IsKworker: ps.IsKworker, - IsExecChild: ps.IsExecChild, - Source: model.ProcessSourceToString(ps.Source), + Pid: ps.Pid, + Tid: ps.Tid, + IsKworker: ps.IsKworker, + IsExecExec: ps.IsExecExec, + Source: model.ProcessSourceToString(ps.Source), Credentials: &ProcessCredentialsSerializer{ CredentialsSerializer: &CredentialsSerializer{}, }, @@ -725,7 +729,7 @@ func newPTraceEventSerializer(e *model.Event) *PTraceEventSerializer { return &PTraceEventSerializer{ Request: model.PTraceRequest(e.PTrace.Request).String(), Address: fmt.Sprintf("0x%x", e.PTrace.Address), - Tracee: newProcessContextSerializer(e.PTrace.Tracee, e), + Tracee: newProcessContextSerializer(e.PTrace.Tracee, e, nil), } } @@ -750,7 +754,7 @@ func newSignalEventSerializer(e *model.Event) *SignalEventSerializer { ses := &SignalEventSerializer{ Type: model.Signal(e.Signal.Type).String(), PID: e.Signal.PID, - Target: newProcessContextSerializer(e.Signal.Target, e), + Target: newProcessContextSerializer(e.Signal.Target, e, nil), } return ses } @@ -830,13 +834,13 @@ func serializeOutcome(retval int64) string { } } -func newProcessContextSerializer(pc *model.ProcessContext, e *model.Event) *ProcessContextSerializer { +func newProcessContextSerializer(pc *model.ProcessContext, e *model.Event, opts *eval.Opts) *ProcessContextSerializer { if pc == nil || pc.Pid == 0 || e == nil { return nil } ps := ProcessContextSerializer{ - ProcessSerializer: newProcessSerializer(&pc.Process, e), + ProcessSerializer: newProcessSerializer(&pc.Process, e, opts), } ctx := eval.NewContext(e) @@ -852,7 +856,7 @@ func newProcessContextSerializer(pc *model.ProcessContext, e *model.Event) *Proc for ptr != nil { pce := (*model.ProcessCacheEntry)(ptr) - s := newProcessSerializer(&pce.Process, e) + s := newProcessSerializer(&pce.Process, e, opts) ps.Ancestors = append(ps.Ancestors, s) if first { @@ -937,8 +941,8 @@ func (e *EventSerializer) MarshalJSON() ([]byte, error) { } // MarshalEvent marshal the event -func MarshalEvent(event *model.Event) ([]byte, error) { - s := NewEventSerializer(event) +func MarshalEvent(event *model.Event, opts *eval.Opts) ([]byte, error) { + s := NewEventSerializer(event, opts) return utils.MarshalEasyJSON(s) } @@ -948,9 +952,9 @@ func MarshalCustomEvent(event *events.CustomEvent) ([]byte, error) { } // NewEventSerializer creates a new event serializer based on the event type -func NewEventSerializer(event *model.Event) *EventSerializer { +func NewEventSerializer(event *model.Event, opts *eval.Opts) *EventSerializer { s := &EventSerializer{ - BaseEventSerializer: NewBaseEventSerializer(event), + BaseEventSerializer: NewBaseEventSerializer(event, opts), UserContextSerializer: newUserContextSerializer(event), DDContextSerializer: newDDContextSerializer(event), } @@ -968,6 +972,7 @@ func NewEventSerializer(event *model.Event) *EventSerializer { s.ContainerContextSerializer = &ContainerContextSerializer{ ID: ctx.ID, CreatedAt: getTimeIfNotZero(time.Unix(0, int64(ctx.CreatedAt))), + Variables: newVariablesContext(event, opts, "container."), } } diff --git a/pkg/security/serializers/serializers_linux_easyjson.go b/pkg/security/serializers/serializers_linux_easyjson.go index 1ddd29834d6477..2a52cb33ef2363 100644 --- a/pkg/security/serializers/serializers_linux_easyjson.go +++ b/pkg/security/serializers/serializers_linux_easyjson.go @@ -1062,9 +1062,35 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers10( case "is_kworker": out.IsKworker = bool(in.Bool()) case "is_exec_child": - out.IsExecChild = bool(in.Bool()) + out.IsExecExec = bool(in.Bool()) case "source": out.Source = string(in.String()) + case "variables": + if in.IsNull() { + in.Skip() + } else { + in.Delim('{') + if !in.IsDelim('}') { + out.Variables = make(Variables) + } else { + out.Variables = nil + } + for !in.IsDelim('}') { + key := string(in.String()) + in.WantColon() + var v11 interface{} + if m, ok := v11.(easyjson.Unmarshaler); ok { + m.UnmarshalEasyJSON(in) + } else if m, ok := v11.(json.Unmarshaler); ok { + _ = m.UnmarshalJSON(in.Raw()) + } else { + v11 = in.Interface() + } + (out.Variables)[key] = v11 + in.WantComma() + } + in.Delim('}') + } default: in.SkipRecursive() } @@ -1195,11 +1221,11 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers10( out.RawString(prefix) { out.RawByte('[') - for v11, v12 := range in.Args { - if v11 > 0 { + for v12, v13 := range in.Args { + if v12 > 0 { out.RawByte(',') } - out.String(string(v12)) + out.String(string(v13)) } out.RawByte(']') } @@ -1214,11 +1240,11 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers10( out.RawString(prefix) { out.RawByte('[') - for v13, v14 := range in.Envs { - if v13 > 0 { + for v14, v15 := range in.Envs { + if v14 > 0 { out.RawByte(',') } - out.String(string(v14)) + out.String(string(v15)) } out.RawByte(']') } @@ -1238,16 +1264,41 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers10( out.RawString(prefix) out.Bool(bool(in.IsKworker)) } - if in.IsExecChild { + if in.IsExecExec { const prefix string = ",\"is_exec_child\":" out.RawString(prefix) - out.Bool(bool(in.IsExecChild)) + out.Bool(bool(in.IsExecExec)) } if in.Source != "" { const prefix string = ",\"source\":" out.RawString(prefix) out.String(string(in.Source)) } + if len(in.Variables) != 0 { + const prefix string = ",\"variables\":" + out.RawString(prefix) + { + out.RawByte('{') + v16First := true + for v16Name, v16Value := range in.Variables { + if v16First { + v16First = false + } else { + out.RawByte(',') + } + out.String(string(v16Name)) + out.RawByte(':') + if m, ok := v16Value.(easyjson.Marshaler); ok { + m.MarshalEasyJSON(out) + } else if m, ok := v16Value.(json.Marshaler); ok { + out.Raw(m.MarshalJSON()) + } else { + out.Raw(json.Marshal(v16Value)) + } + } + out.RawByte('}') + } + } out.RawByte('}') } @@ -1328,9 +1379,9 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers11( out.CapEffective = (out.CapEffective)[:0] } for !in.IsDelim(']') { - var v15 string - v15 = string(in.String()) - out.CapEffective = append(out.CapEffective, v15) + var v17 string + v17 = string(in.String()) + out.CapEffective = append(out.CapEffective, v17) in.WantComma() } in.Delim(']') @@ -1351,9 +1402,9 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers11( out.CapPermitted = (out.CapPermitted)[:0] } for !in.IsDelim(']') { - var v16 string - v16 = string(in.String()) - out.CapPermitted = append(out.CapPermitted, v16) + var v18 string + v18 = string(in.String()) + out.CapPermitted = append(out.CapPermitted, v18) in.WantComma() } in.Delim(']') @@ -1456,11 +1507,11 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers11( out.RawString("null") } else { out.RawByte('[') - for v17, v18 := range in.CapEffective { - if v17 > 0 { + for v19, v20 := range in.CapEffective { + if v19 > 0 { out.RawByte(',') } - out.String(string(v18)) + out.String(string(v20)) } out.RawByte(']') } @@ -1472,11 +1523,11 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers11( out.RawString("null") } else { out.RawByte('[') - for v19, v20 := range in.CapPermitted { - if v19 > 0 { + for v21, v22 := range in.CapPermitted { + if v21 > 0 { out.RawByte(',') } - out.String(string(v20)) + out.String(string(v22)) } out.RawByte(']') } @@ -1829,9 +1880,9 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers15( out.Argv = (out.Argv)[:0] } for !in.IsDelim(']') { - var v21 string - v21 = string(in.String()) - out.Argv = append(out.Argv, v21) + var v23 string + v23 = string(in.String()) + out.Argv = append(out.Argv, v23) in.WantComma() } in.Delim(']') @@ -1875,11 +1926,11 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers15( out.RawString(prefix) { out.RawByte('[') - for v22, v23 := range in.Argv { - if v22 > 0 { + for v24, v25 := range in.Argv { + if v24 > 0 { out.RawByte(',') } - out.String(string(v23)) + out.String(string(v25)) } out.RawByte(']') } @@ -2149,9 +2200,9 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers18( out.Flags = (out.Flags)[:0] } for !in.IsDelim(']') { - var v24 string - v24 = string(in.String()) - out.Flags = append(out.Flags, v24) + var v26 string + v26 = string(in.String()) + out.Flags = append(out.Flags, v26) in.WantComma() } in.Delim(']') @@ -2212,9 +2263,9 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers18( out.Hashes = (out.Hashes)[:0] } for !in.IsDelim(']') { - var v25 string - v25 = string(in.String()) - out.Hashes = append(out.Hashes, v25) + var v27 string + v27 = string(in.String()) + out.Hashes = append(out.Hashes, v27) in.WantComma() } in.Delim(']') @@ -2351,11 +2402,11 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers18( out.RawString(prefix) { out.RawByte('[') - for v26, v27 := range in.Flags { - if v26 > 0 { + for v28, v29 := range in.Flags { + if v28 > 0 { out.RawByte(',') } - out.String(string(v27)) + out.String(string(v29)) } out.RawByte(']') } @@ -2390,11 +2441,11 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers18( out.RawString(prefix) { out.RawByte('[') - for v28, v29 := range in.Hashes { - if v28 > 0 { + for v30, v31 := range in.Hashes { + if v30 > 0 { out.RawByte(',') } - out.String(string(v29)) + out.String(string(v31)) } out.RawByte(']') } @@ -2527,9 +2578,9 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers19( out.Flags = (out.Flags)[:0] } for !in.IsDelim(']') { - var v30 string - v30 = string(in.String()) - out.Flags = append(out.Flags, v30) + var v32 string + v32 = string(in.String()) + out.Flags = append(out.Flags, v32) in.WantComma() } in.Delim(']') @@ -2590,9 +2641,9 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers19( out.Hashes = (out.Hashes)[:0] } for !in.IsDelim(']') { - var v31 string - v31 = string(in.String()) - out.Hashes = append(out.Hashes, v31) + var v33 string + v33 = string(in.String()) + out.Hashes = append(out.Hashes, v33) in.WantComma() } in.Delim(']') @@ -2769,11 +2820,11 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers19( out.RawString(prefix) { out.RawByte('[') - for v32, v33 := range in.Flags { - if v32 > 0 { + for v34, v35 := range in.Flags { + if v34 > 0 { out.RawByte(',') } - out.String(string(v33)) + out.String(string(v35)) } out.RawByte(']') } @@ -2808,11 +2859,11 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers19( out.RawString(prefix) { out.RawByte('[') - for v34, v35 := range in.Hashes { - if v34 > 0 { + for v36, v37 := range in.Hashes { + if v36 > 0 { out.RawByte(',') } - out.String(string(v35)) + out.String(string(v37)) } out.RawByte(']') } @@ -3399,9 +3450,9 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers21( out.Tags = (out.Tags)[:0] } for !in.IsDelim(']') { - var v36 string - v36 = string(in.String()) - out.Tags = append(out.Tags, v36) + var v38 string + v38 = string(in.String()) + out.Tags = append(out.Tags, v38) in.WantComma() } in.Delim(']') @@ -3439,11 +3490,11 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers21( out.RawString("null") } else { out.RawByte('[') - for v37, v38 := range in.Tags { - if v37 > 0 { + for v39, v40 := range in.Tags { + if v39 > 0 { out.RawByte(',') } - out.String(string(v38)) + out.String(string(v40)) } out.RawByte(']') } @@ -3514,9 +3565,9 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers23( out.CapEffective = (out.CapEffective)[:0] } for !in.IsDelim(']') { - var v39 string - v39 = string(in.String()) - out.CapEffective = append(out.CapEffective, v39) + var v41 string + v41 = string(in.String()) + out.CapEffective = append(out.CapEffective, v41) in.WantComma() } in.Delim(']') @@ -3537,9 +3588,9 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers23( out.CapPermitted = (out.CapPermitted)[:0] } for !in.IsDelim(']') { - var v40 string - v40 = string(in.String()) - out.CapPermitted = append(out.CapPermitted, v40) + var v42 string + v42 = string(in.String()) + out.CapPermitted = append(out.CapPermitted, v42) in.WantComma() } in.Delim(']') @@ -3625,11 +3676,11 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers23( out.RawString("null") } else { out.RawByte('[') - for v41, v42 := range in.CapEffective { - if v41 > 0 { + for v43, v44 := range in.CapEffective { + if v43 > 0 { out.RawByte(',') } - out.String(string(v42)) + out.String(string(v44)) } out.RawByte(']') } @@ -3641,11 +3692,11 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers23( out.RawString("null") } else { out.RawByte('[') - for v43, v44 := range in.CapPermitted { - if v43 > 0 { + for v45, v46 := range in.CapPermitted { + if v45 > 0 { out.RawByte(',') } - out.String(string(v44)) + out.String(string(v46)) } out.RawByte(']') } @@ -3697,9 +3748,9 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers24( out.CapEffective = (out.CapEffective)[:0] } for !in.IsDelim(']') { - var v45 string - v45 = string(in.String()) - out.CapEffective = append(out.CapEffective, v45) + var v47 string + v47 = string(in.String()) + out.CapEffective = append(out.CapEffective, v47) in.WantComma() } in.Delim(']') @@ -3720,9 +3771,9 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers24( out.CapPermitted = (out.CapPermitted)[:0] } for !in.IsDelim(']') { - var v46 string - v46 = string(in.String()) - out.CapPermitted = append(out.CapPermitted, v46) + var v48 string + v48 = string(in.String()) + out.CapPermitted = append(out.CapPermitted, v48) in.WantComma() } in.Delim(']') @@ -3748,11 +3799,11 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers24( out.RawString("null") } else { out.RawByte('[') - for v47, v48 := range in.CapEffective { - if v47 > 0 { + for v49, v50 := range in.CapEffective { + if v49 > 0 { out.RawByte(',') } - out.String(string(v48)) + out.String(string(v50)) } out.RawByte(']') } @@ -3764,11 +3815,11 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers24( out.RawString("null") } else { out.RawByte('[') - for v49, v50 := range in.CapPermitted { - if v49 > 0 { + for v51, v52 := range in.CapPermitted { + if v51 > 0 { out.RawByte(',') } - out.String(string(v50)) + out.String(string(v52)) } out.RawByte(']') } @@ -3880,9 +3931,9 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers26( out.Helpers = (out.Helpers)[:0] } for !in.IsDelim(']') { - var v51 string - v51 = string(in.String()) - out.Helpers = append(out.Helpers, v51) + var v53 string + v53 = string(in.String()) + out.Helpers = append(out.Helpers, v53) in.WantComma() } in.Delim(']') @@ -3947,11 +3998,11 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers26( } { out.RawByte('[') - for v52, v53 := range in.Helpers { - if v52 > 0 { + for v54, v55 := range in.Helpers { + if v54 > 0 { out.RawByte(',') } - out.String(string(v53)) + out.String(string(v55)) } out.RawByte(']') } diff --git a/pkg/security/serializers/serializers_others.go b/pkg/security/serializers/serializers_others.go index 296bf4ca362afa..53212383adfc6b 100644 --- a/pkg/security/serializers/serializers_others.go +++ b/pkg/security/serializers/serializers_others.go @@ -12,6 +12,7 @@ import ( json "encoding/json" "github.com/DataDog/datadog-agent/pkg/security/events" + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" ) @@ -24,8 +25,8 @@ func (e *EventSerializer) ToJSON() ([]byte, error) { } // MarshalEvent marshal the event -func MarshalEvent(event *model.Event) ([]byte, error) { - s := NewEventSerializer(event) +func MarshalEvent(event *model.Event, opts *eval.Opts) ([]byte, error) { + s := NewEventSerializer(event, opts) return json.Marshal(s) } @@ -35,6 +36,6 @@ func MarshalCustomEvent(event *events.CustomEvent) ([]byte, error) { } // NewEventSerializer creates a new event serializer based on the event type -func NewEventSerializer(_ *model.Event) *EventSerializer { +func NewEventSerializer(_ *model.Event, _ *eval.Opts) *EventSerializer { return nil } diff --git a/pkg/security/serializers/serializers_windows.go b/pkg/security/serializers/serializers_windows.go index dc2cfa5225f2f0..0a1954eadcf227 100644 --- a/pkg/security/serializers/serializers_windows.go +++ b/pkg/security/serializers/serializers_windows.go @@ -43,6 +43,8 @@ type ProcessSerializer struct { OwnerSidString string `json:"user_sid,omitempty"` // User name User string `json:"user,omitempty"` + // Variables values + Variables Variables `json:"variables,omitempty"` } // FileEventSerializer serializes a file event to JSON @@ -65,7 +67,7 @@ func newFileSerializer(fe *model.FileEvent, e *model.Event, _ ...uint64) *FileSe } } -func newProcessSerializer(ps *model.Process, e *model.Event) *ProcessSerializer { +func newProcessSerializer(ps *model.Process, e *model.Event, opts *eval.Opts) *ProcessSerializer { psSerializer := &ProcessSerializer{ ExecTime: getTimeIfNotZero(ps.ExecTime), ExitTime: getTimeIfNotZero(ps.ExitTime), @@ -76,6 +78,7 @@ func newProcessSerializer(ps *model.Process, e *model.Event) *ProcessSerializer CmdLine: e.FieldHandlers.ResolveProcessCmdLineScrubbed(e, ps), OwnerSidString: ps.OwnerSidString, User: e.FieldHandlers.ResolveUser(e, ps), + Variables: newVariablesContext(e, opts, "process."), } if len(ps.ContainerID) != 0 { @@ -86,13 +89,13 @@ func newProcessSerializer(ps *model.Process, e *model.Event) *ProcessSerializer return psSerializer } -func newProcessContextSerializer(pc *model.ProcessContext, e *model.Event) *ProcessContextSerializer { +func newProcessContextSerializer(pc *model.ProcessContext, e *model.Event, opts *eval.Opts) *ProcessContextSerializer { if pc == nil || pc.Pid == 0 || e == nil { return nil } ps := ProcessContextSerializer{ - ProcessSerializer: newProcessSerializer(&pc.Process, e), + ProcessSerializer: newProcessSerializer(&pc.Process, e, opts), } ctx := eval.NewContext(e) @@ -105,7 +108,7 @@ func newProcessContextSerializer(pc *model.ProcessContext, e *model.Event) *Proc for ptr != nil { pce := (*model.ProcessCacheEntry)(ptr) - s := newProcessSerializer(&pce.Process, e) + s := newProcessSerializer(&pce.Process, e, opts) ps.Ancestors = append(ps.Ancestors, s) if first { @@ -129,8 +132,8 @@ func (e *EventSerializer) ToJSON() ([]byte, error) { } // MarshalEvent marshal the event -func MarshalEvent(event *model.Event) ([]byte, error) { - s := NewEventSerializer(event) +func MarshalEvent(event *model.Event, opts *eval.Opts) ([]byte, error) { + s := NewEventSerializer(event, opts) return json.Marshal(s) } @@ -140,8 +143,8 @@ func MarshalCustomEvent(event *events.CustomEvent) ([]byte, error) { } // NewEventSerializer creates a new event serializer based on the event type -func NewEventSerializer(event *model.Event) *EventSerializer { +func NewEventSerializer(event *model.Event, opts *eval.Opts) *EventSerializer { return &EventSerializer{ - BaseEventSerializer: NewBaseEventSerializer(event), + BaseEventSerializer: NewBaseEventSerializer(event, opts), } } diff --git a/pkg/security/tests/module_tester.go b/pkg/security/tests/module_tester.go index 161976cfa8c44d..7c29a7ffe5941b 100644 --- a/pkg/security/tests/module_tester.go +++ b/pkg/security/tests/module_tester.go @@ -519,7 +519,7 @@ func (tm *testModule) WaitSignal(tb testing.TB, action func() error, cb onRuleHa //nolint:deadcode,unused func (tm *testModule) marshalEvent(ev *model.Event) (string, error) { - b, err := serializers.MarshalEvent(ev) + b, err := serializers.MarshalEvent(ev, nil) return string(b), err } @@ -740,7 +740,7 @@ func genTestConfigs(cfgDir string, opts testOpts) (*emconfig.Config, *secconfig. "RuntimeSecurityEnabled": runtimeSecurityEnabled, "SBOMEnabled": opts.enableSBOM, "EBPFLessEnabled": ebpfLessEnabled, - "FIMEnabled": opts.enableFIM, // should only be enabled/disabled on windows + "FIMEnabled": opts.enableFIM, // should only be enabled/disabled on windows }); err != nil { return nil, nil, err } diff --git a/pkg/security/tests/module_tester_linux.go b/pkg/security/tests/module_tester_linux.go index c16e5797bf33b7..13a96aaf1aa1b6 100644 --- a/pkg/security/tests/module_tester_linux.go +++ b/pkg/security/tests/module_tester_linux.go @@ -358,7 +358,7 @@ func assertReturnValue(tb testing.TB, retval, expected int64) bool { //nolint:deadcode,unused func validateProcessContextLineage(tb testing.TB, event *model.Event) { - eventJSON, err := serializers.MarshalEvent(event) + eventJSON, err := serializers.MarshalEvent(event, nil) if err != nil { tb.Errorf("failed to marshal event: %v", err) return @@ -475,7 +475,7 @@ func validateProcessContextSECL(tb testing.TB, event *model.Event) { valid := nameFieldValid && pathFieldValid if !valid { - eventJSON, err := serializers.MarshalEvent(event) + eventJSON, err := serializers.MarshalEvent(event, nil) if err != nil { tb.Errorf("failed to marshal event: %v", err) return diff --git a/pkg/security/tests/serializers_test.go b/pkg/security/tests/serializers_test.go index 9dfc059a026b83..d6dc5b6dbec108 100644 --- a/pkg/security/tests/serializers_test.go +++ b/pkg/security/tests/serializers_test.go @@ -61,7 +61,7 @@ func fetchRealisticEventSerializerInner(tb testing.TB) *serializers.EventSeriali assert.Equal(tb, "open", event.GetType(), "wrong event type") }) - return serializers.NewEventSerializer(workingEvent) + return serializers.NewEventSerializer(workingEvent, nil) } func BenchmarkSerializersEasyJson(b *testing.B) { diff --git a/pkg/serializer/go.mod b/pkg/serializer/go.mod index c0ada906deba91..f029699d64637a 100644 --- a/pkg/serializer/go.mod +++ b/pkg/serializer/go.mod @@ -12,7 +12,6 @@ replace ( github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface => ../../comp/forwarder/orchestrator/orchestratorinterface github.com/DataDog/datadog-agent/pkg/aggregator/ckey => ../aggregator/ckey github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../collector/check/defaults - github.com/DataDog/datadog-agent/pkg/comp/core/secrets => ../../comp/core/secrets github.com/DataDog/datadog-agent/pkg/config/env => ../config/env github.com/DataDog/datadog-agent/pkg/config/model => ../config/model github.com/DataDog/datadog-agent/pkg/config/setup => ../config/setup/ @@ -133,7 +132,7 @@ require ( github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.46.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - github.com/shirou/gopsutil/v3 v3.23.12 // indirect + github.com/shirou/gopsutil/v3 v3.24.1 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/spf13/afero v1.9.5 // indirect github.com/spf13/cast v1.6.0 // indirect diff --git a/pkg/serializer/go.sum b/pkg/serializer/go.sum index 3cb952ac1ce40b..a4a91d3b6ca749 100644 --- a/pkg/serializer/go.sum +++ b/pkg/serializer/go.sum @@ -367,8 +367,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= -github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= -github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI= +github.com/shirou/gopsutil/v3 v3.24.1/go.mod h1:UU7a2MSBQa+kW1uuDq8DeEBS8kmrnQwsv2b5O513rwU= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -635,7 +635,6 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/pkg/snmp/snmpparse/config_snmp.go b/pkg/snmp/snmpparse/config_snmp.go index 2cd7449451aebb..e918a91077651c 100644 --- a/pkg/snmp/snmpparse/config_snmp.go +++ b/pkg/snmp/snmpparse/config_snmp.go @@ -118,7 +118,7 @@ func GetConfigCheckSnmp() ([]SNMPConfig, error) { c := util.GetClient(false) // FIX: get certificates right then make this true // Set session token - err := util.SetAuthToken() + err := util.SetAuthToken(config.Datadog) if err != nil { return nil, err } diff --git a/pkg/trace/api/api.go b/pkg/trace/api/api.go index 8a2db206cb8dd4..0230d866ffd965 100644 --- a/pkg/trace/api/api.go +++ b/pkg/trace/api/api.go @@ -87,6 +87,7 @@ type HTTPReceiver struct { statsd statsd.ClientInterface timing timing.Reporter + info *watchdog.CurrentInfo } // NewHTTPReceiver returns a pointer to a new HTTPReceiver @@ -137,6 +138,7 @@ func NewHTTPReceiver( statsd: statsd, timing: timing, + info: watchdog.NewCurrentInfo(), } } @@ -655,9 +657,9 @@ var killProcess = func(format string, a ...interface{}) { // the configuration MaxMemory and MaxCPU. If these values are 0, all limits are disabled and the rate // limiter will accept everything. func (r *HTTPReceiver) watchdog(now time.Time) { - cpu, _ := watchdog.CPU(now) + cpu, _ := r.info.CPU(now) wi := watchdog.Info{ - Mem: watchdog.Mem(), + Mem: r.info.Mem(), CPU: cpu, } if r.conf.MaxMemory > 0 { diff --git a/pkg/trace/go.mod b/pkg/trace/go.mod index 0eb92414cc455b..3bdb99a6b479fc 100644 --- a/pkg/trace/go.mod +++ b/pkg/trace/go.mod @@ -1,6 +1,6 @@ module github.com/DataDog/datadog-agent/pkg/trace -go 1.20 +go 1.21 // NOTE: Prefer using simple `require` directives instead of using `replace` if possible. // See https://github.com/DataDog/datadog-agent/blob/main/docs/dev/gomodreplace.md @@ -27,7 +27,7 @@ require ( github.com/golang/protobuf v1.5.3 github.com/google/gofuzz v1.2.0 github.com/google/uuid v1.3.1 - github.com/shirou/gopsutil/v3 v3.23.12 + github.com/shirou/gopsutil/v3 v3.24.1 github.com/stretchr/testify v1.8.4 github.com/tinylib/msgp v1.1.8 github.com/vmihailenco/msgpack/v4 v4.3.12 diff --git a/pkg/trace/go.sum b/pkg/trace/go.sum index 856ed114557325..fb1b321d027bbc 100644 --- a/pkg/trace/go.sum +++ b/pkg/trace/go.sum @@ -215,9 +215,11 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= @@ -287,10 +289,11 @@ github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= -github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= -github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI= +github.com/shirou/gopsutil/v3 v3.24.1/go.mod h1:UU7a2MSBQa+kW1uuDq8DeEBS8kmrnQwsv2b5O513rwU= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -370,6 +373,7 @@ go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= @@ -382,6 +386,7 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -478,6 +483,7 @@ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -529,7 +535,6 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -693,6 +698,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/pkg/trace/watchdog/info.go b/pkg/trace/watchdog/info.go index ff5884ed8ed86b..5522df7fc48ff3 100644 --- a/pkg/trace/watchdog/info.go +++ b/pkg/trace/watchdog/info.go @@ -55,17 +55,6 @@ type CurrentInfo struct { lastCPU CPUInfo } -// globalCurrentInfo is a global default object one can safely use -// if only one goroutine is polling for CPU() and Mem() -var globalCurrentInfo *CurrentInfo - -func ensureGlobalInfo() { - if globalCurrentInfo != nil { - return - } - globalCurrentInfo = NewCurrentInfo() -} - // NewCurrentInfo creates a new CurrentInfo referring to the current running program. func NewCurrentInfo() *CurrentInfo { return &CurrentInfo{ @@ -108,15 +97,3 @@ func (pi *CurrentInfo) Mem() MemInfo { runtime.ReadMemStats(&ms) return MemInfo{Alloc: ms.Alloc} } - -// CPU returns basic CPU info, or the previous valid CPU info and an error. -func CPU(now time.Time) (CPUInfo, error) { - ensureGlobalInfo() - return globalCurrentInfo.CPU(now) -} - -// Mem returns basic memory info. -func Mem() MemInfo { - ensureGlobalInfo() - return globalCurrentInfo.Mem() -} diff --git a/pkg/trace/watchdog/info_test.go b/pkg/trace/watchdog/info_test.go index bd1f5e70f95e75..cfd2fab6ab208c 100644 --- a/pkg/trace/watchdog/info_test.go +++ b/pkg/trace/watchdog/info_test.go @@ -21,11 +21,12 @@ const ( func TestCPULow(t *testing.T) { assert := assert.New(t) runtime.GC() + info := NewCurrentInfo() - _, _ = CPU(time.Now()) - globalCurrentInfo.cacheDelay = testDuration + _, _ = info.CPU(time.Now()) + info.cacheDelay = testDuration time.Sleep(testDuration) - c, _ := CPU(time.Now()) + c, _ := info.CPU(time.Now()) t.Logf("CPU (sleep): %v", c) // checking that CPU is low enough, this is theoretically flaky, @@ -42,6 +43,7 @@ func TestCPUHigh(t *testing.T) { {10, false}, {100, false}, } + info := NewCurrentInfo() for _, tc := range tests { t.Run(fmt.Sprintf("%d_goroutines", tc.n), func(t *testing.T) { if !tc.runShort && testing.Short() { @@ -51,8 +53,8 @@ func TestCPUHigh(t *testing.T) { runtime.GC() done := make(chan struct{}, 1) - CPU(time.Now()) - globalCurrentInfo.cacheDelay = testDuration + info.CPU(time.Now()) + info.cacheDelay = testDuration for i := 0; i < tc.n; i++ { go func() { j := 0 @@ -67,7 +69,7 @@ func TestCPUHigh(t *testing.T) { }() } time.Sleep(testDuration) - c, _ := CPU(time.Now()) + c, _ := info.CPU(time.Now()) for i := 0; i < tc.n; i++ { done <- struct{}{} } @@ -85,10 +87,11 @@ func TestMemLow(t *testing.T) { assert := assert.New(t) runtime.GC() - oldM := Mem() - globalCurrentInfo.cacheDelay = testDuration + info := NewCurrentInfo() + oldM := info.Mem() + info.cacheDelay = testDuration time.Sleep(testDuration) - m := Mem() + m := info.Mem() t.Logf("Mem (sleep): %v", m) // Checking that Mem is low enough, this is theorically flaky, @@ -103,8 +106,9 @@ func doTestMemHigh(t *testing.T, n int) { done := make(chan struct{}, 1) data := make(chan []byte, 1) - oldM := Mem() - globalCurrentInfo.cacheDelay = testDuration + info := NewCurrentInfo() + oldM := info.Mem() + info.cacheDelay = testDuration go func() { a := make([]byte, n) a[0] = 1 @@ -113,7 +117,7 @@ func doTestMemHigh(t *testing.T, n int) { <-done }() time.Sleep(testDuration) - m := Mem() + m := info.Mem() done <- struct{}{} t.Logf("Mem (%d bytes): %v %v", n, oldM, m) @@ -133,21 +137,21 @@ func TestMemHigh(t *testing.T) { } func BenchmarkCPU(b *testing.B) { - CPU(time.Now()) // make sure globalCurrentInfo exists - globalCurrentInfo.cacheDelay = 0 // disable cache + info := NewCurrentInfo() + info.cacheDelay = 0 // disable cache b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { - _, _ = CPU(time.Now()) + _, _ = info.CPU(time.Now()) } } func BenchmarkMem(b *testing.B) { - Mem() // make sure globalCurrentInfo exists - globalCurrentInfo.cacheDelay = 0 // disable cache + info := NewCurrentInfo() + info.cacheDelay = 0 // disable cache b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { - _ = Mem() + _ = info.Mem() } } diff --git a/pkg/util/cgroups/go.mod b/pkg/util/cgroups/go.mod index dde5a7cee967dd..85f3a404cb3a58 100644 --- a/pkg/util/cgroups/go.mod +++ b/pkg/util/cgroups/go.mod @@ -1,6 +1,6 @@ module github.com/DataDog/datadog-agent/pkg/util/cgroups -go 1.20 +go 1.21 replace ( github.com/DataDog/datadog-agent/pkg/util/log => ../log diff --git a/pkg/util/clusteragent/clcrunner.go b/pkg/util/clusteragent/clcrunner.go index 4f73c08acab9f1..f37f793a8aba40 100644 --- a/pkg/util/clusteragent/clcrunner.go +++ b/pkg/util/clusteragent/clcrunner.go @@ -58,7 +58,7 @@ func GetCLCRunnerClient() (CLCRunnerClientInterface, error) { func (c *CLCRunnerClient) init() { c.initErr = nil - authToken, err := security.GetClusterAgentAuthToken() + authToken, err := security.GetClusterAgentAuthToken(config.Datadog) if err != nil { c.initErr = err return diff --git a/pkg/util/clusteragent/clusteragent.go b/pkg/util/clusteragent/clusteragent.go index 0127a6c1e7a09f..b58b9c4ea7303a 100644 --- a/pkg/util/clusteragent/clusteragent.go +++ b/pkg/util/clusteragent/clusteragent.go @@ -115,7 +115,7 @@ func (c *DCAClient) init() error { return err } - authToken, err := security.GetClusterAgentAuthToken() + authToken, err := security.GetClusterAgentAuthToken(config.Datadog) if err != nil { return err } diff --git a/pkg/util/clusteragent/clusteragent_test.go b/pkg/util/clusteragent/clusteragent_test.go index 5f6825364306ee..72d6b4b65192a5 100644 --- a/pkg/util/clusteragent/clusteragent_test.go +++ b/pkg/util/clusteragent/clusteragent_test.go @@ -307,7 +307,7 @@ func (suite *clusterAgentSuite) TestGetClusterAgentEndpointEmpty() { func (suite *clusterAgentSuite) TestGetClusterAgentAuthTokenEmpty() { mockConfig.SetWithoutSource("cluster_agent.auth_token", "") - _, err := security.CreateOrGetClusterAgentAuthToken() + _, err := security.CreateOrGetClusterAgentAuthToken(mockConfig) require.Nil(suite.T(), err, fmt.Sprintf("%v", err)) } @@ -315,7 +315,7 @@ func (suite *clusterAgentSuite) TestGetClusterAgentAuthTokenEmptyFile() { mockConfig.SetWithoutSource("cluster_agent.auth_token", "") err := os.WriteFile(suite.authTokenPath, []byte(""), os.ModePerm) require.Nil(suite.T(), err, fmt.Sprintf("%v", err)) - _, err = security.GetClusterAgentAuthToken() + _, err = security.GetClusterAgentAuthToken(mockConfig) require.NotNil(suite.T(), err, fmt.Sprintf("%v", err)) } @@ -324,7 +324,7 @@ func (suite *clusterAgentSuite) TestGetClusterAgentAuthTokenFileInvalid() { err := os.WriteFile(suite.authTokenPath, []byte("tooshort"), os.ModePerm) require.Nil(suite.T(), err, fmt.Sprintf("%v", err)) - _, err = security.GetClusterAgentAuthToken() + _, err = security.GetClusterAgentAuthToken(mockConfig) require.NotNil(suite.T(), err, fmt.Sprintf("%v", err)) } @@ -334,7 +334,7 @@ func (suite *clusterAgentSuite) TestGetClusterAgentAuthToken() { err := os.WriteFile(suite.authTokenPath, []byte(tokenFileValue), os.ModePerm) require.Nil(suite.T(), err, fmt.Sprintf("%v", err)) - t, err := security.GetClusterAgentAuthToken() + t, err := security.GetClusterAgentAuthToken(mockConfig) require.Nil(suite.T(), err, fmt.Sprintf("%v", err)) assert.Equal(suite.T(), tokenFileValue, t) } @@ -346,7 +346,7 @@ func (suite *clusterAgentSuite) TestGetClusterAgentAuthTokenConfigPriority() { require.Nil(suite.T(), err, fmt.Sprintf("%v", err)) // load config token value instead of filesystem - t, err := security.GetClusterAgentAuthToken() + t, err := security.GetClusterAgentAuthToken(mockConfig) require.Nil(suite.T(), err, fmt.Sprintf("%v", err)) assert.Equal(suite.T(), clusterAgentTokenValue, t) } @@ -357,7 +357,7 @@ func (suite *clusterAgentSuite) TestGetClusterAgentAuthTokenTooShort() { err := os.WriteFile(suite.authTokenPath, []byte(tokenValue), os.ModePerm) require.Nil(suite.T(), err, fmt.Sprintf("%v", err)) - _, err = security.GetClusterAgentAuthToken() + _, err = security.GetClusterAgentAuthToken(mockConfig) require.NotNil(suite.T(), err, fmt.Sprintf("%v", err)) } diff --git a/pkg/util/log/go.mod b/pkg/util/log/go.mod index 005ce27fd49e2f..29d19e77e33d6f 100644 --- a/pkg/util/log/go.mod +++ b/pkg/util/log/go.mod @@ -1,6 +1,6 @@ module github.com/DataDog/datadog-agent/pkg/util/log -go 1.20 +go 1.21 replace github.com/DataDog/datadog-agent/pkg/util/scrubber => ../scrubber diff --git a/pkg/util/log/go.sum b/pkg/util/log/go.sum index a0262bb26d4052..9342b6a6fac25d 100644 --- a/pkg/util/log/go.sum +++ b/pkg/util/log/go.sum @@ -1,10 +1,12 @@ github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -19,6 +21,7 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.22.0 h1:Zcye5DUgBloQ9BaT4qc9BnjOFog5TvBSAGkJ3Nf70c0= diff --git a/pkg/util/pointer/go.mod b/pkg/util/pointer/go.mod index ba8f023e4a57e6..03ea32d93c4233 100644 --- a/pkg/util/pointer/go.mod +++ b/pkg/util/pointer/go.mod @@ -1,3 +1,3 @@ module github.com/DataDog/datadog-agent/pkg/util/pointer -go 1.20 +go 1.21 diff --git a/pkg/util/scrubber/go.mod b/pkg/util/scrubber/go.mod index 257a6b7a4088bc..fe076f63ecfe32 100644 --- a/pkg/util/scrubber/go.mod +++ b/pkg/util/scrubber/go.mod @@ -1,6 +1,6 @@ module github.com/DataDog/datadog-agent/pkg/util/scrubber -go 1.20 +go 1.21 require ( github.com/stretchr/testify v1.8.1 diff --git a/tasks/libs/common/utils.py b/tasks/libs/common/utils.py index dc33ae2fc86418..e30271d12102e7 100644 --- a/tasks/libs/common/utils.py +++ b/tasks/libs/common/utils.py @@ -234,18 +234,6 @@ def get_common_test_args(build_tags, failfast): } -def set_runtime_comp_env(env): - env["DD_ENABLE_RUNTIME_COMPILER"] = "true" - env["DD_ALLOW_PRECOMPILED_FALLBACK"] = "false" - env["DD_ENABLE_CO_RE"] = "false" - - -def set_co_re_env(env): - env["DD_ENABLE_CO_RE"] = "true" - env["DD_ALLOW_RUNTIME_COMPILED_FALLBACK"] = "false" - env["DD_ALLOW_PRECOMPILED_FALLBACK"] = "false" - - def get_payload_version(): """ Return the Agent payload version (`x.y.z`) found in the go.mod file. diff --git a/tasks/modules.py b/tasks/modules.py index bc966e711bc8ac..c53faea1833a7f 100644 --- a/tasks/modules.py +++ b/tasks/modules.py @@ -180,6 +180,7 @@ def dependency_path(self, agent_version): ), "comp/logs/agent/config": GoModule("comp/logs/agent/config", independent=True), "cmd/agent/common/path": GoModule("cmd/agent/common/path", independent=True), + "pkg/api": GoModule("pkg/api", independent=True), "pkg/config/model": GoModule("pkg/config/model", independent=True), "pkg/config/env": GoModule("pkg/config/env", independent=True), "pkg/config/setup": GoModule("pkg/config/setup", independent=True), diff --git a/tasks/pr_checks.py b/tasks/pr_checks.py index fefb5adef8b785..808bc2aca0ae51 100644 --- a/tasks/pr_checks.py +++ b/tasks/pr_checks.py @@ -32,6 +32,7 @@ def lint_releasenote(ctx): if not github.contains_release_note(pr_id): print( "Error: No releasenote was found for this PR. Please add one using 'reno'" + ", see https://github.com/DataDog/datadog-agent/blob/main/docs/dev/contributing.md#reno" ", or apply the label 'changelog/no-changelog' to the PR." ) raise Exit(code=1) diff --git a/tasks/system_probe.py b/tasks/system_probe.py index d26331b0b8b44f..362be486cfaf79 100644 --- a/tasks/system_probe.py +++ b/tasks/system_probe.py @@ -29,8 +29,6 @@ get_common_test_args, get_gobin, get_version_numeric_only, - set_co_re_env, - set_runtime_comp_env, ) from tasks.libs.ninja_syntax import NinjaWriter from tasks.windows_resources import MESSAGESTRINGS_MC_PATH, arch_to_windres_target @@ -628,9 +626,6 @@ def test( packages=TEST_PACKAGES, bundle_ebpf=False, output_path=None, - runtime_compiled=False, - co_re=False, - skip_linters=False, skip_object_files=False, run=None, failfast=False, @@ -650,10 +645,6 @@ def test( "preserve your environment", ) - if not skip_linters and not is_windows: - clang_format(ctx) - clang_tidy(ctx) - if not skip_object_files: build_object_files( ctx, @@ -675,10 +666,6 @@ def test( _, _, env = get_build_flags(ctx) env["DD_SYSTEM_PROBE_BPF_DIR"] = EMBEDDED_SHARE_DIR - if runtime_compiled: - set_runtime_comp_env(env) - elif co_re: - set_co_re_env(env) go_root = os.getenv("GOROOT") if go_root: @@ -715,8 +702,6 @@ def test_debug( package, run, bundle_ebpf=False, - runtime_compiled=False, - co_re=False, skip_object_files=False, failfast=False, kernel_release=None, @@ -753,10 +738,6 @@ def test_debug( _, _, env = get_build_flags(ctx) env["DD_SYSTEM_PROBE_BPF_DIR"] = EMBEDDED_SHARE_DIR - if runtime_compiled: - set_runtime_comp_env(env) - elif co_re: - set_co_re_env(env) cmd = '{sudo}{dlv} test {dir} --build-flags="-mod=mod -v {failfast} -tags={build_tags}" -- -test.run {run}' ctx.run(cmd.format(**args), env=env, pty=True, warn=True) @@ -866,7 +847,7 @@ def kitchen_prepare(ctx, kernel_release=None, ci=False, packages=""): # test/kitchen/site-cookbooks/dd-system-probe-check/files/default/tests/pkg/ebpf/testsuite # test/kitchen/site-cookbooks/dd-system-probe-check/files/default/tests/pkg/ebpf/bytecode/testsuite for i, pkg in enumerate(target_packages): - target_path = os.path.join(KITCHEN_ARTIFACT_DIR, re.sub("^.*datadog-agent.", "", pkg)) + target_path = os.path.join(KITCHEN_ARTIFACT_DIR, pkg.lstrip(os.getcwd())) target_bin = "testsuite" if is_windows: target_bin = "testsuite.exe" @@ -875,7 +856,6 @@ def kitchen_prepare(ctx, kernel_release=None, ci=False, packages=""): ctx, packages=pkg, skip_object_files=(i != 0), - skip_linters=True, bundle_ebpf=False, output_path=os.path.join(target_path, target_bin), kernel_release=kernel_release, @@ -1040,7 +1020,21 @@ def clang_format(ctx, targets=None, fix=False, fail_on_issue=False): targets = get_ebpf_targets() # remove externally maintained files - ignored_files = ["pkg/ebpf/c/bpf_helpers.h", "pkg/ebpf/c/bpf_endian.h", "pkg/ebpf/compiler/clang-stdarg.h"] + ignored_files = [ + "pkg/ebpf/c/bpf_builtins.h", + "pkg/ebpf/c/bpf_core_read.h", + "pkg/ebpf/c/bpf_cross_compile.h", + "pkg/ebpf/c/bpf_endian.h", + "pkg/ebpf/c/bpf_helpers.h", + "pkg/ebpf/c/bpf_helper_defs.h", + "pkg/ebpf/c/bpf_tracing.h", + "pkg/ebpf/c/bpf_tracing_custom.h", + "pkg/ebpf/c/compiler.h", + "pkg/ebpf/c/map-defs.h", + "pkg/ebpf/c/vmlinux_5_15_0.h", + "pkg/ebpf/c/vmlinux_5_15_0_arm.h", + "pkg/ebpf/compiler/clang-stdarg.h", + ] for f in ignored_files: if f in targets: targets.remove(f) @@ -1078,15 +1072,28 @@ def clang_tidy(ctx, fix=False, fail_on_issue=False, kernel_release=None): network_flags.append(f"-I{network_c_dir}") network_flags.append(f"-I{os.path.join(network_c_dir, 'prebuilt')}") network_flags.append(f"-I{os.path.join(network_c_dir, 'runtime')}") - run_tidy(ctx, files=network_files, build_flags=network_flags, fix=fix, fail_on_issue=fail_on_issue) + network_checks = [ + "-readability-function-cognitive-complexity", + "-readability-isolate-declaration", + "-clang-analyzer-security.insecureAPI.bcmp", + ] + run_tidy( + ctx, + files=network_files, + build_flags=network_flags, + fix=fix, + fail_on_issue=fail_on_issue, + checks=network_checks, + ) security_agent_c_dir = os.path.join(".", "pkg", "security", "ebpf", "c") security_files = list(base_files) security_files.extend(glob.glob(f"{security_agent_c_dir}/**/*.c")) security_flags = list(build_flags) security_flags.append(f"-I{security_agent_c_dir}") + security_flags.append(f"-I{security_agent_c_dir}/include") security_flags.append("-DUSE_SYSCALL_WRAPPER=0") - security_checks = ["-readability-function-cognitive-complexity"] + security_checks = ["-readability-function-cognitive-complexity", "-readability-isolate-declaration"] run_tidy( ctx, files=security_files, diff --git a/test/kitchen/test-definitions/windows-install-test.yml b/test/kitchen/test-definitions/windows-install-test.yml index 4c412e38250bfd..881326fb6d732b 100644 --- a/test/kitchen/test-definitions/windows-install-test.yml +++ b/test/kitchen/test-definitions/windows-install-test.yml @@ -1,63 +1,4 @@ suites: - - name: win-repair - run_list: - - "recipe[dd-agent-install::_install_windows_base]" - - "recipe[dd-agent-install::_stop_windows_agent]" - - "recipe[dd-agent-install::_damage_windows_install]" - - "recipe[dd-agent-install::_repair_windows_install]" - attributes: - datadog: - <% dd_agent_config.each do |key, value| %> - <%= key %>: "<%= value %>" - <% end %> - dd-agent-install: - <% if ENV['AGENT_VERSION'] %> - windows_version: "<%= ENV['AGENT_VERSION'] %>" - <% end %> - windows_agent_url: <%= windows_agent_url %> - <% if ENV['WINDOWS_AGENT_FILE'] %> - windows_agent_filename: "<%= ENV['WINDOWS_AGENT_FILE'] %>" - <% end %> - agent_install_options: > - APIKEY=<%= api_key %> - dd-agent-rspec: - skip_windows_signing_test: &skip_windows_signing_test <%= ENV['SKIP_SIGNATURE_TEST'] || false %> - cws_included: &cws_included <%= ENV['WINDOWS_DDPROCMON_DRIVER'] || false %> - - - name: win-upgrade-rollback - run_list: - - "recipe[dd-agent-system-files-check::list-files-before-install]" - - "recipe[dd-agent-install]" - - "recipe[dd-agent-upgrade]" - attributes: - datadog: - agent_version: <%= ENV['LAST_STABLE_VERSION'] %> - api_key: <%= api_key %> - dd-agent-upgrade: - <% dd_agent_config.each do |key, value| %> - <%= key %>: "<%= value %>" - <% end %> - <% if ENV['AGENT_VERSION'] %> - windows_version: "<%= ENV['AGENT_VERSION'] %>" - <% end %> - <% if ENV['WINDOWS_AGENT_FILE'] %> - windows_agent_filename: "<%= ENV['WINDOWS_AGENT_FILE'] %>" - <% end %> - agent_install_options: > - APIKEY=<%= api_key %> - WIXFAILWHENDEFERRED=1 - dd-agent-upgrade-rspec: - # Used by the rspec test to know the version to which the agent should be upgraded - agent_expected_version: "<%= ENV['LAST_STABLE_VERSION'] %>" - - ## for the rollback test, always skip the signature test. Whatever the previous version - ## is would have been tested for signature at the time it was released. And checking - ## for signature here gets us caught inbetween if the test is run across certificate - ## renewal boundary (the old binary signed with old cert, the new with new) - skip_windows_signing_test: &skip_windows_signing_test true - dd-agent-rspec: - skip_windows_signing_test: &skip_windows_signing_test true - - name: win-installopts run_list: - "recipe[dd-agent-system-files-check::list-files-before-install]" diff --git a/test/new-e2e/examples/dockerenv_test.go b/test/new-e2e/examples/dockerenv_test.go new file mode 100644 index 00000000000000..22913d26f42516 --- /dev/null +++ b/test/new-e2e/examples/dockerenv_test.go @@ -0,0 +1,37 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package examples + +import ( + "fmt" + "regexp" + "testing" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + awsdocker "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/docker" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclient" +) + +type dockerSuite struct { + e2e.BaseSuite[environments.DockerHost] +} + +func TestDocker(t *testing.T) { + e2e.Run(t, &dockerSuite{}, e2e.WithProvisioner(awsdocker.Provisioner(awsdocker.WithoutFakeIntake()))) +} + +func (v *dockerSuite) TestExecuteCommand() { + agentVersion := v.Env().Agent.Client.Version() + regexpVersion := regexp.MustCompile(`.*Agent .* - Commit: .* - Serialization version: .* - Go version: .*`) + + v.Require().Truef(regexpVersion.MatchString(agentVersion), fmt.Sprintf("%v doesn't match %v", agentVersion, regexpVersion)) + // args is used to test client.WithArgs. The values of the arguments are not relevant. + args := agentclient.WithArgs([]string{"-n", "-c", "."}) + version := v.Env().Agent.Client.Version(args) + + v.Require().Truef(regexpVersion.MatchString(version), fmt.Sprintf("%v doesn't match %v", version, regexpVersion)) +} diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index 3ccddf436fce14..c91f02898669e4 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -27,7 +27,7 @@ require ( // `TEST_INFRA_DEFINITIONS_BUILDIMAGES` matches the commit sha in the module version // Example: github.com/DataDog/test-infra-definitions v0.0.0-YYYYMMDDHHmmSS-0123456789AB // => TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0123456789AB - github.com/DataDog/test-infra-definitions v0.0.0-20240201160319-9f1c04270e18 + github.com/DataDog/test-infra-definitions v0.0.0-20240207172919-6ba25fcf6a61 github.com/aws/aws-sdk-go-v2 v1.24.0 github.com/aws/aws-sdk-go-v2/config v1.25.10 github.com/aws/aws-sdk-go-v2/service/ec2 v1.138.1 diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index ba070efc386f2e..57c0f18eb57f1a 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -12,8 +12,8 @@ github.com/DataDog/datadog-api-client-go/v2 v2.19.0 h1:Wvz/63/q39EpVwSH1T8jVyRvP github.com/DataDog/datadog-api-client-go/v2 v2.19.0/go.mod h1:oD5Lx8Li3oPRa/BSBenkn4i48z+91gwYORF/+6ph71g= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a h1:m9REhmyaWD5YJ0P53ygRHxKKo+KM+nw+zz0hEdKztMo= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/test-infra-definitions v0.0.0-20240201160319-9f1c04270e18 h1:cq5x6aiQWafSgiGDIZheEfQFVeQ397UzkJ19gGEzvc8= -github.com/DataDog/test-infra-definitions v0.0.0-20240201160319-9f1c04270e18/go.mod h1:Mcl9idboPONlGfuPsiNHycNiyXVJNQKi/Q+ZOXczzYc= +github.com/DataDog/test-infra-definitions v0.0.0-20240207172919-6ba25fcf6a61 h1:K5V7CA5fxxLL1UKzrqemwGDprjh1t65bppDJPgZ7I4E= +github.com/DataDog/test-infra-definitions v0.0.0-20240207172919-6ba25fcf6a61/go.mod h1:Mcl9idboPONlGfuPsiNHycNiyXVJNQKi/Q+ZOXczzYc= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A= diff --git a/test/new-e2e/pkg/environments/aws/docker/host.go b/test/new-e2e/pkg/environments/aws/docker/host.go index 8e39c8fe8b7a97..a5233aae359305 100644 --- a/test/new-e2e/pkg/environments/aws/docker/host.go +++ b/test/new-e2e/pkg/environments/aws/docker/host.go @@ -14,6 +14,7 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional" + "github.com/DataDog/test-infra-definitions/common/utils" "github.com/DataDog/test-infra-definitions/components/datadog/agent" "github.com/DataDog/test-infra-definitions/components/datadog/dockeragentparams" "github.com/DataDog/test-infra-definitions/components/docker" @@ -141,7 +142,12 @@ func Run(ctx *pulumi.Context, env *environments.DockerHost, params *ProvisionerP return err } - manager, _, err := docker.NewManager(*awsEnv.CommonEnvironment, host, true) + installEcrCredsHelperCmd, err := ec2.InstallECRCredentialsHelper(awsEnv, host) + if err != nil { + return err + } + + manager, _, err := docker.NewManager(*awsEnv.CommonEnvironment, host, true, utils.PulumiDependsOn(installEcrCredsHelperCmd)) if err != nil { return err } diff --git a/test/new-e2e/pkg/runner/configmap.go b/test/new-e2e/pkg/runner/configmap.go index d78f7bfd618b15..0334186095f7e5 100644 --- a/test/new-e2e/pkg/runner/configmap.go +++ b/test/new-e2e/pkg/runner/configmap.go @@ -23,6 +23,8 @@ const ( AgentAPPKey = commonconfig.DDAgentConfigNamespace + ":" + commonconfig.DDAgentAPPKeyParamName // AgentPipelineID pulumi config parameter name AgentPipelineID = commonconfig.DDAgentConfigNamespace + ":" + commonconfig.DDAgentPipelineID + // AgentCommitSHA pulumi config parameter name + AgentCommitSHA = commonconfig.DDAgentConfigNamespace + ":" + commonconfig.DDAgentCommitSHA // InfraEnvironmentVariables pulumi config paramater name InfraEnvironmentVariables = commonconfig.DDInfraConfigNamespace + ":" + commonconfig.DDInfraEnvironment @@ -117,6 +119,11 @@ func BuildStackParameters(profile Profile, scenarioConfig ConfigMap) (ConfigMap, if err != nil { return nil, err } + err = SetConfigMapFromParameter(profile.ParamStore(), cm, parameters.CommitSHA, AgentCommitSHA) + if err != nil { + return nil, err + } + // Secret parameters from profile store err = SetConfigMapFromSecret(profile.SecretStore(), cm, parameters.APIKey, AgentAPIKey) if err != nil { diff --git a/test/new-e2e/pkg/runner/configmap_test.go b/test/new-e2e/pkg/runner/configmap_test.go index df1979fa439bb3..ea2ba6fbd1c4ca 100644 --- a/test/new-e2e/pkg/runner/configmap_test.go +++ b/test/new-e2e/pkg/runner/configmap_test.go @@ -39,5 +39,6 @@ func Test_BuildStackParameters(t *testing.T) { "ddinfra:aws/defaultPrivateKeyPath": auto.ConfigValue{Value: "private_key_path", Secret: false}, "ddinfra:aws/defaultPrivateKeyPassword": auto.ConfigValue{Value: "private_key_password", Secret: true}, "ddagent:pipeline_id": auto.ConfigValue{Value: "pipeline_id", Secret: false}, + "ddagent:commit_sha": auto.ConfigValue{Value: "commit_sha", Secret: false}, }, configMap) } diff --git a/test/new-e2e/pkg/runner/parameters/const.go b/test/new-e2e/pkg/runner/parameters/const.go index 65415dab30ad18..2f5e4691a7fbe4 100644 --- a/test/new-e2e/pkg/runner/parameters/const.go +++ b/test/new-e2e/pkg/runner/parameters/const.go @@ -35,6 +35,8 @@ const ( StackParameters StoreKey = "stack_params" // PipelineID config file parameter name PipelineID StoreKey = "pipeline_id" + // CommitSHA config file parameter name + CommitSHA StoreKey = "commit_sha" // VerifyCodeSignature config file parameter name VerifyCodeSignature StoreKey = "verify_code_signature" // OutputDir config file parameter name diff --git a/test/new-e2e/tests/apm/docker_test.go b/test/new-e2e/tests/apm/docker_test.go index 9575f3770963ca..8aeec02d87e60f 100644 --- a/test/new-e2e/tests/apm/docker_test.go +++ b/test/new-e2e/tests/apm/docker_test.go @@ -66,11 +66,6 @@ func (s *DockerFakeintakeSuite) TestTraceAgentMetrics() { } func (s *DockerFakeintakeSuite) TestTracesHaveContainerTag() { - if s.transport != uds { - // TODO: Container tagging with cgroup v2 currently only works over UDS - // We should update this to run over TCP as well once that is implemented. - s.T().Skip("Container Tagging with Cgroup v2 only works on UDS") - } err := s.Env().FakeIntake.Client().FlushServerAndResetAggregators() s.Require().NoError(err) diff --git a/test/new-e2e/tests/apm/tests.go b/test/new-e2e/tests/apm/tests.go index 54401b867ca1da..fd45f83eaf4a0a 100644 --- a/test/new-e2e/tests/apm/tests.go +++ b/test/new-e2e/tests/apm/tests.go @@ -51,6 +51,7 @@ func testBasicTraces(c *assert.CollectT, service string, intake *components.Fake } func testStatsForService(t *testing.T, c *assert.CollectT, service string, intake *components.FakeIntake) { + t.Helper() stats, err := intake.Client().GetAPMStats() assert.NoError(c, err) assert.NotEmpty(c, stats) @@ -59,6 +60,7 @@ func testStatsForService(t *testing.T, c *assert.CollectT, service string, intak } func testTracesHaveContainerTag(t *testing.T, c *assert.CollectT, service string, intake *components.FakeIntake) { + t.Helper() traces, err := intake.Client().GetTraces() assert.NoError(c, err) assert.NotEmpty(c, traces) @@ -94,6 +96,7 @@ func hasContainerTag(payloads []*aggregator.TracePayload, tag string) bool { } func testTraceAgentMetrics(t *testing.T, c *assert.CollectT, intake *components.FakeIntake) { + t.Helper() expected := map[string]struct{}{ // "datadog.trace_agent.started": {}, // FIXME: this metric is flaky "datadog.trace_agent.heartbeat": {}, diff --git a/test/new-e2e/tests/apm/vm_test.go b/test/new-e2e/tests/apm/vm_test.go index 3ebdc1fe0d6d8a..5b677b9e8e875a 100644 --- a/test/new-e2e/tests/apm/vm_test.go +++ b/test/new-e2e/tests/apm/vm_test.go @@ -40,20 +40,6 @@ func NewVMFakeintakeSuite(tr transport) *VMFakeintakeSuite { } func vmSuiteOpts(tr transport, opts ...awshost.ProvisionerOption) []e2e.SuiteOption { - opts = append(opts, awshost.WithDocker()) - options := []e2e.SuiteOption{ - e2e.WithProvisioner(awshost.Provisioner(opts...)), - e2e.WithStackName(fmt.Sprintf("apm-vm-suite-%s-%v", tr, os.Getenv("CI_PIPELINE_ID"))), - } - return options -} - -// TestVMFakeintakeSuiteUDS runs basic Trace Agent tests over the UDS transport -func TestVMFakeintakeSuiteUDS(t *testing.T) { - cfg := ` -apm_config.enabled: true -apm_config.receiver_socket: /var/run/datadog/apm.socket -` setupScript := `#!/bin/bash # /var/run/datadog directory is necessary for UDS socket creation sudo mkdir -p /var/run/datadog @@ -66,13 +52,29 @@ sudo chown dd-agent:dd-agent /var/run/datadog sudo groupadd -f -r docker sudo usermod -a -G docker dd-agent ` - - options := vmSuiteOpts(uds, + opts = append(opts, + awshost.WithDocker(), // Create the /var/run/datadog directory and ensure // permissions are correct so the agent can create - // unix sockets for the UDS transport + // unix sockets for the UDS transport and communicate with the docker socket. awshost.WithEC2InstanceOptions(ec2.WithUserData(setupScript)), + ) + options := []e2e.SuiteOption{ + e2e.WithProvisioner(awshost.Provisioner(opts...)), + e2e.WithStackName(fmt.Sprintf("apm-vm-suite-%s-%v", tr, os.Getenv("CI_PIPELINE_ID"))), + } + return options +} + +// TestVMFakeintakeSuiteUDS runs basic Trace Agent tests over the UDS transport +func TestVMFakeintakeSuiteUDS(t *testing.T) { + cfg := ` +apm_config.enabled: true +apm_config.receiver_socket: /var/run/datadog/apm.socket +` + + options := vmSuiteOpts(uds, // Enable the UDS receiver in the trace-agent awshost.WithAgentOptions(agentparams.WithAgentConfig(cfg))) e2e.Run(t, NewVMFakeintakeSuite(uds), options...) @@ -109,12 +111,6 @@ func (s *VMFakeintakeSuite) TestTraceAgentMetrics() { } func (s *VMFakeintakeSuite) TestTracesHaveContainerTag() { - if s.transport != uds { - // TODO: Container tagging with cgroup v2 currently only works over UDS - // We should update this to run over TCP as well once that is implemented. - s.T().Skip("Container Tagging with Cgroup v2 only works on UDS") - } - err := s.Env().FakeIntake.Client().FlushServerAndResetAggregators() s.Require().NoError(err) diff --git a/test/new-e2e/tests/containers/ecs_test.go b/test/new-e2e/tests/containers/ecs_test.go index 4c73b8847a3424..0ec0210c28d298 100644 --- a/test/new-e2e/tests/containers/ecs_test.go +++ b/test/new-e2e/tests/containers/ecs_test.go @@ -197,6 +197,8 @@ func (suite *ecsSuite) TestNginxECS() { `^image_name:ghcr.io/datadog/apps-nginx-server$`, `^image_tag:main$`, `^nginx_cluster_name:` + regexp.QuoteMeta(suite.ecsClusterName) + `$`, + `^nginx_host:`, + `^port:80$`, `^short_image:apps-nginx-server$`, `^task_arn:`, `^task_family:.*-nginx-ec2$`, diff --git a/test/new-e2e/tests/containers/k8s_test.go b/test/new-e2e/tests/containers/k8s_test.go index 2187677f6c4279..c6f573a4a17554 100644 --- a/test/new-e2e/tests/containers/k8s_test.go +++ b/test/new-e2e/tests/containers/k8s_test.go @@ -261,8 +261,10 @@ func (suite *k8sSuite) TestNginx() { `^kube_qos:Burstable$`, `^kube_replica_set:nginx-[[:alnum:]]+$`, `^kube_service:nginx$`, + `^nginx_host:`, `^pod_name:nginx-[[:alnum:]]+-[[:alnum:]]+$`, `^pod_phase:running$`, + `^port:80$`, `^short_image:apps-nginx-server$`, }, }, diff --git a/test/new-e2e/tests/cws/fargate_test.go b/test/new-e2e/tests/cws/fargate_test.go index fe1b68e0aa1be8..e1ff0ee6e789e7 100644 --- a/test/new-e2e/tests/cws/fargate_test.go +++ b/test/new-e2e/tests/cws/fargate_test.go @@ -280,6 +280,11 @@ func getAgentFullImagePath(e *configCommon.CommonEnvironment) string { if fullImagePath := e.AgentFullImagePath(); fullImagePath != "" { return fullImagePath } + + if e.PipelineID() != "" && e.CommitSHA() != "" { + return fmt.Sprintf("669783387624.dkr.ecr.us-east-1.amazonaws.com/agent:%s-%s", e.PipelineID(), e.CommitSHA()) + } + return agentDefaultImagePath } diff --git a/test/new-e2e/tests/remote-config/rc_ssl_config_test.go b/test/new-e2e/tests/remote-config/rc_ssl_config_test.go index 9975cccec4daff..ec262c9adbf8f6 100644 --- a/test/new-e2e/tests/remote-config/rc_ssl_config_test.go +++ b/test/new-e2e/tests/remote-config/rc_ssl_config_test.go @@ -39,8 +39,7 @@ func TestSslConfigSuite(t *testing.T) { // TestRemoteConfigSSLConfigMismatch tests the startup condition where the agent's SSL config is disabled but RC's TLS validation is not explicitly disabled func (s *sslConfigSuite) TestRemoteConfigSSLConfigMismatch() { // Ensure the remote config service starts - // TODO uncomment the following line in https://github.com/DataDog/datadog-agent/pull/22582 (once fx lifecycle startup logging is added) - //assertLogsEventually(a.T(), a.Env().RemoteHost, "agent", "remote config service started", 2*time.Minute, 5*time.Second) + assertLogsEventually(s.T(), s.Env().RemoteHost, "agent", "remote config service started", 2*time.Minute, 5*time.Second) // Ensure the agent logs a warning about the SSL config mismatch assertLogsEventually(s.T(), s.Env().RemoteHost, "agent", "remote Configuration does not allow skipping TLS validation by default", 2*time.Minute, 5*time.Second) diff --git a/test/new-e2e/tests/remote-config/tracer_test.go b/test/new-e2e/tests/remote-config/tracer_test.go index 2bf64126094f83..cd207099872a0e 100644 --- a/test/new-e2e/tests/remote-config/tracer_test.go +++ b/test/new-e2e/tests/remote-config/tracer_test.go @@ -43,8 +43,7 @@ func TestRcTracerSuite(t *testing.T) { // TestRemoteConfigTracerUpdate tests the remote-config service by attempting to retrieve RC payloads as if a tracer were calling it func (s *tracerSuite) TestRemoteConfigTracerUpdate() { // Ensure the remote config service starts - // TODO uncomment the following line in https://github.com/DataDog/datadog-agent/pull/22582 (once fx lifecycle startup logging is added) - //assertLogsEventually(a.T(), a.Env().RemoteHost, "agent", "remote config service started", 2*time.Minute, 5*time.Second) + assertLogsEventually(s.T(), s.Env().RemoteHost, "agent", "remote config service started", 2*time.Minute, 5*time.Second) // Wait until we've started querying for configs assertLogsEventually(s.T(), s.Env().RemoteHost, "agent", "/api/v0.1/configurations", 2*time.Minute, 5*time.Second) diff --git a/test/new-e2e/tests/windows/agent/agent.go b/test/new-e2e/tests/windows/agent/agent.go index 6cfeadd38e7720..427b268f0b73e4 100644 --- a/test/new-e2e/tests/windows/agent/agent.go +++ b/test/new-e2e/tests/windows/agent/agent.go @@ -34,6 +34,15 @@ func GetDatadogAgentProductCode(host *components.RemoteHost) (string, error) { return windows.GetProductCodeByName(host, "Datadog Agent") } +// RepairAllAgent repairs the Datadog Agent +func RepairAllAgent(host *components.RemoteHost, args string, logPath string) error { + product, err := GetDatadogAgentProductCode(host) + if err != nil { + return err + } + return windows.RepairAllMSI(host, product, args, logPath) +} + // UninstallAgent uninstalls the Datadog Agent func UninstallAgent(host *components.RemoteHost, logPath string) error { product, err := GetDatadogAgentProductCode(host) diff --git a/test/new-e2e/tests/windows/agent/package.go b/test/new-e2e/tests/windows/agent/package.go index 40722ec3e5e459..62d6a92e46bdf2 100644 --- a/test/new-e2e/tests/windows/agent/package.go +++ b/test/new-e2e/tests/windows/agent/package.go @@ -12,6 +12,8 @@ import ( "os" "strings" + "github.com/DataDog/datadog-agent/pkg/version" + "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/agent/installers/v2" "github.com/aws/aws-sdk-go-v2/aws" awsConfig "github.com/aws/aws-sdk-go-v2/config" @@ -44,9 +46,11 @@ type Package struct { URL string } -// AgentVersion returns the Package version without the -1 suffix, which should match the Agent version. +// AgentVersion returns a string containing version number and the pre only, e.g. `0.0.0-beta.1` func (p *Package) AgentVersion() string { - return strings.TrimSuffix(p.Version, "-1") + // Trim the package suffix and parse the remaining version info + ver, _ := version.New(strings.TrimSuffix(p.Version, "-1"), "") + return ver.GetNumberAndPre() } // GetBetaMSIURL returns the URL for the beta agent MSI diff --git a/test/new-e2e/tests/windows/install-test/install_test.go b/test/new-e2e/tests/windows/install-test/install_test.go index 270beb07c203bf..e7dab0dfbd1df6 100644 --- a/test/new-e2e/tests/windows/install-test/install_test.go +++ b/test/new-e2e/tests/windows/install-test/install_test.go @@ -9,9 +9,11 @@ package installtest import ( "flag" "fmt" + "os" "path/filepath" "strings" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" @@ -20,7 +22,7 @@ import ( windows "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows" windowsAgent "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/agent" - "github.com/DataDog/test-infra-definitions/components/os" + componentos "github.com/DataDog/test-infra-definitions/components/os" "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" "testing" @@ -37,7 +39,7 @@ type agentMSISuite struct { func TestMSI(t *testing.T) { opts := []e2e.SuiteOption{e2e.WithProvisioner(awshost.ProvisionerNoAgentNoFakeIntake( - awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault)), + awshost.WithEC2InstanceOptions(ec2.WithOS(componentos.WindowsDefault)), ))} if *devMode { opts = append(opts, e2e.WithDevMode()) @@ -58,7 +60,12 @@ func TestMSI(t *testing.T) { if agentPackage.PipelineID == "" && agentPackage.Channel != "" { stackNameChannelPart = fmt.Sprintf("-%s", agentPackage.Channel) } - opts = append(opts, e2e.WithStackName(fmt.Sprintf("windows-msi-test-v%s-%s%s", majorVersion, agentPackage.Arch, stackNameChannelPart))) + stackNameCIJobPart := "" + ciJobID := os.Getenv("CI_JOB_ID") + if ciJobID != "" { + stackNameCIJobPart = fmt.Sprintf("-%s", os.Getenv("CI_JOB_ID")) + } + opts = append(opts, e2e.WithStackName(fmt.Sprintf("windows-msi-test-v%s-%s%s%s", majorVersion, agentPackage.Arch, stackNameChannelPart, stackNameCIJobPart))) s := &agentMSISuite{ agentPackage: agentPackage, @@ -92,13 +99,12 @@ func (is *agentMSISuite) TestInstall() { vm := is.Env().RemoteHost is.prepareHost() - t, err := NewTester(is.T(), vm, WithExpectedAgentVersion(is.agentPackage.AgentVersion())) - is.Require().NoError(err, "should create tester") + t := is.installAgent(vm, "", filepath.Join(outputDir, "install.log")) - if !t.TestInstallAgentPackage(is.T(), is.agentPackage, "", filepath.Join(outputDir, "install.log")) { - is.T().Fatal("failed to install agent") + if !t.TestExpectations(is.T()) { + is.T().FailNow() } - t.TestRuntimeExpectations(is.T()) + t.TestUninstall(is.T(), filepath.Join(outputDir, "uninstall.log")) } @@ -110,40 +116,131 @@ func (is *agentMSISuite) TestUpgrade() { vm := is.Env().RemoteHost is.prepareHost() - t, err := NewTester(is.T(), vm, WithExpectedAgentVersion(is.agentPackage.AgentVersion())) + _ = is.installLastStable(vm, "", filepath.Join(outputDir, "install.log")) + + t, err := NewTester(is.T(), vm, + WithAgentPackage(is.agentPackage), + ) is.Require().NoError(err, "should create tester") - // install old agent - _ = is.installLastStable(t, filepath.Join(outputDir, "install.log")) + if !is.Run(fmt.Sprintf("upgrade to %s", t.agentPackage.AgentVersion()), func() { + err = t.InstallAgent(is.T(), "", filepath.Join(outputDir, "upgrade.log")) + is.Require().NoError(err, "should upgrade to agent %s", t.agentPackage.AgentVersion()) + }) { + is.T().FailNow() + } - // upgrade to new agent - if !t.TestInstallAgentPackage(is.T(), is.agentPackage, "", filepath.Join(outputDir, "upgrade.log")) { - is.T().Fatal("failed to upgrade agent") + if !t.TestExpectations(is.T()) { + is.T().FailNow() } - t.TestRuntimeExpectations(is.T()) t.TestUninstall(is.T(), filepath.Join(outputDir, "uninstall.log")) } -// This is separate from TestInstallAgentPackage because previous versions of the agent -// may not conform to the latest test expectations. -func (is *agentMSISuite) installLastStable(t *Tester, logfile string) *windowsAgent.Package { - var agentPackage *windowsAgent.Package +// TC-INS-002 +func (is *agentMSISuite) TestUpgradeRollback() { + outputDir, err := runner.GetTestOutputDir(runner.GetProfile(), is.T()) + is.Require().NoError(err, "should get output dir") + is.T().Logf("Output dir: %s", outputDir) - if !is.Run("install prev stable agent", func() { - var err error + vm := is.Env().RemoteHost + is.prepareHost() - agentPackage, err = windowsAgent.GetLastStablePackageFromEnv() - is.Require().NoError(err, "should get last stable agent package from env") + previousTester := is.installLastStable(vm, "", filepath.Join(outputDir, "install.log")) - t.InstallAgentPackage(is.T(), agentPackage, "", logfile) + t, err := NewTester(is.T(), vm, + WithAgentPackage(is.agentPackage), + ) + is.Require().NoError(err, "should create tester") - agentVersion, err := t.InstallTestClient.GetAgentVersion() - is.Require().NoError(err, "should get agent version") - windowsAgent.TestAgentVersion(is.T(), agentPackage.AgentVersion(), agentVersion) + if !is.Run(fmt.Sprintf("upgrade to %s with rollback", t.agentPackage.AgentVersion()), func() { + err = t.InstallAgent(is.T(), "WIXFAILWHENDEFERRED=1", filepath.Join(outputDir, "upgrade.log")) + is.Require().Error(err, "should fail to install agent %s", t.agentPackage.AgentVersion()) }) { - is.T().Fatal("failed to install last stable agent") + is.T().FailNow() + } + + // TODO: we shouldn't have to start the agent manually after rollback + // but the kitchen tests did too. + err = windows.StartService(t.host, "DatadogAgent") + is.Require().NoError(err, "agent service should start after rollback") + + if !previousTester.TestExpectations(is.T()) { + is.T().FailNow() + } + + previousTester.TestUninstall(is.T(), filepath.Join(outputDir, "uninstall.log")) +} + +// TC-INS-001 +func (is *agentMSISuite) TestRepair() { + outputDir, err := runner.GetTestOutputDir(runner.GetProfile(), is.T()) + is.Require().NoError(err, "should get output dir") + is.T().Logf("Output dir: %s", outputDir) + + vm := is.Env().RemoteHost + is.prepareHost() + + t := is.installAgent(vm, "", filepath.Join(outputDir, "install.log")) + + err = windows.StopService(t.host, "DatadogAgent") + is.Require().NoError(err) + + // Corrupt the install + err = t.host.Remove("C:\\Program Files\\Datadog\\Datadog Agent\\bin\\agent.exe") + is.Require().NoError(err) + err = t.host.RemoveAll("C:\\Program Files\\Datadog\\Datadog Agent\\embedded3") + is.Require().NoError(err) + + if !is.Run("repair install", func() { + err = windowsAgent.RepairAllAgent(t.host, "", filepath.Join(outputDir, "repair.log")) + is.Require().NoError(err) + }) { + is.T().FailNow() + } + + if !t.TestExpectations(is.T()) { + is.T().FailNow() + } + + t.TestUninstall(is.T(), filepath.Join(outputDir, "uninstall.log")) +} + +func (is *agentMSISuite) installAgentPackage(vm *components.RemoteHost, agentPackage *windowsAgent.Package, args string, logfile string, testerOpts ...TesterOption) *Tester { + opts := []TesterOption{ + WithAgentPackage(agentPackage), + } + opts = append(opts, testerOpts...) + t, err := NewTester(is.T(), vm, opts...) + is.Require().NoError(err, "should create tester") + + if !is.Run(fmt.Sprintf("install %s", t.agentPackage.AgentVersion()), func() { + err = t.InstallAgent(is.T(), args, logfile) + is.Require().NoError(err, "should install agent %s", t.agentPackage.AgentVersion()) + }) { + is.T().FailNow() + } + + return t +} + +// installAgent installs the agent package on the VM and returns the Tester +func (is *agentMSISuite) installAgent(vm *components.RemoteHost, args string, logfile string, testerOpts ...TesterOption) *Tester { + return is.installAgentPackage(vm, is.agentPackage, args, logfile, testerOpts...) +} + +// installLastStable installs the last stable agent package on the VM, runs tests, and returns the Tester +func (is *agentMSISuite) installLastStable(vm *components.RemoteHost, args string, logfile string) *Tester { + previousAgentPackage, err := windowsAgent.GetLastStablePackageFromEnv() + is.Require().NoError(err, "should get last stable agent package from env") + t := is.installAgentPackage(vm, previousAgentPackage, args, logfile, + WithPreviousVersion(), + ) + + // Ensure the agent is functioning properly to provide a proper foundation for the test + if !t.TestExpectations(is.T()) { + is.T().FailNow() } - return agentPackage + return t } diff --git a/test/new-e2e/tests/windows/install-test/installtester.go b/test/new-e2e/tests/windows/install-test/installtester.go index 1b4b700434f042..1df6e69e0fb08a 100644 --- a/test/new-e2e/tests/windows/install-test/installtester.go +++ b/test/new-e2e/tests/windows/install-test/installtester.go @@ -27,6 +27,12 @@ type Tester struct { host *components.RemoteHost InstallTestClient *common.TestClient + agentPackage *windowsAgent.Package + isPreviousVersion bool + + // Path to the MSI on the remote host, only available after install is run + remoteMSIPath string + expectedAgentVersion string expectedAgentMajorVersion string @@ -53,6 +59,20 @@ func NewTester(tt *testing.T, host *components.RemoteHost, opts ...TesterOption) t.beforeInstallSystemDirListPath = `C:\system-files-before-install.log` t.afterUninstallSystemDirListPath = `C:\system-files-after-uninstall.log` + // If the system file snapshot doesn't exist, create it + snapshotExists, err := t.host.FileExists(t.beforeInstallSystemDirListPath) + if err != nil { + return nil, err + } + if !snapshotExists { + if !tt.Run("snapshot system files", func(tt *testing.T) { + err = t.snapshotSystemfiles(tt, t.beforeInstallSystemDirListPath) + require.NoError(tt, err) + }) { + tt.FailNow() + } + } + for _, opt := range opts { opt(t) } @@ -61,14 +81,32 @@ func NewTester(tt *testing.T, host *components.RemoteHost, opts ...TesterOption) return nil, fmt.Errorf("expectedAgentVersion is required") } + // Ensure the expected version is well formed + if !tt.Run("validate input params", func(tt *testing.T) { + if !windowsAgent.TestAgentVersion(tt, t.expectedAgentVersion, t.expectedAgentVersion) { + tt.FailNow() + } + }) { + tt.FailNow() + } + return t, nil } -// WithExpectedAgentVersion sets the expected agent version to be installed -func WithExpectedAgentVersion(version string) TesterOption { +// WithAgentPackage sets the agent package to be installed +func WithAgentPackage(agentPackage *windowsAgent.Package) TesterOption { return func(t *Tester) { - t.expectedAgentVersion = version - t.expectedAgentMajorVersion = strings.Split(version, ".")[0] + t.agentPackage = agentPackage + t.expectedAgentVersion = agentPackage.AgentVersion() + t.expectedAgentMajorVersion = strings.Split(t.expectedAgentVersion, ".")[0] + } +} + +// WithPreviousVersion sets the Tester to expect a previous version of the agent to be installed +// and will not run all tests since expectations may have changed. +func WithPreviousVersion() TesterOption { + return func(t *Tester) { + t.isPreviousVersion = true } } @@ -213,6 +251,17 @@ func (t *Tester) snapshotSystemfiles(tt *testing.T, remotePath string) error { func (t *Tester) testDoesNotChangeSystemFiles(tt *testing.T) bool { return tt.Run("does not remove system files", func(tt *testing.T) { + tt.Cleanup(func() { + // Remove the snapshot files after the test + err := t.host.Remove(t.beforeInstallSystemDirListPath) + if err != nil { + tt.Logf("failed to remove %s: %s", t.beforeInstallSystemDirListPath, err) + } + err = t.host.Remove(t.afterUninstallSystemDirListPath) + if err != nil { + tt.Logf("failed to remove %s: %s", t.afterUninstallSystemDirListPath, err) + } + }) // Diff the two files on the remote host, selecting missing items cmd := fmt.Sprintf(`Compare-Object -ReferenceObject (Get-Content "%s") -DifferenceObject (Get-Content "%s") | Where-Object -Property SideIndicator -EQ '<=' | Select -ExpandProperty InputObject`, t.beforeInstallSystemDirListPath, t.afterUninstallSystemDirListPath) output, err := t.host.Execute(cmd) @@ -249,35 +298,6 @@ func (t *Tester) InstallAgentPackage(tt *testing.T, agentPackage *windowsAgent.P return remoteMSIPath, err } -// TestInstallAgentPackage installs the agent and runs tests -func (t *Tester) TestInstallAgentPackage(tt *testing.T, agentPackage *windowsAgent.Package, args string, logfile string) bool { - return tt.Run("install the agent", func(tt *testing.T) { - if !tt.Run("snapshot system files", func(tt *testing.T) { - err := t.snapshotSystemfiles(tt, t.beforeInstallSystemDirListPath) - require.NoError(tt, err) - }) { - tt.Fatal("snapshot system files failed") - } - - var remoteMSIPath string - var err error - if !tt.Run("install", func(tt *testing.T) { - remoteMSIPath, err = t.InstallAgentPackage(tt, agentPackage, args, logfile) - require.NoError(tt, err, "should install the agent") - }) { - tt.Fatal("install failed") - } - - installedVersion, err := t.InstallTestClient.GetAgentVersion() - require.NoError(tt, err, "should get agent version") - windowsAgent.TestAgentVersion(tt, t.expectedAgentVersion, installedVersion) - - windowsAgent.TestValidDatadogCodeSignatures(tt, t.host, []string{remoteMSIPath}) - common.CheckInstallation(tt, t.InstallTestClient) - t.testAgentCodeSignature(tt) - }) -} - // TestUninstall uninstalls the agent and runs tests func (t *Tester) TestUninstall(tt *testing.T, logfile string) bool { return tt.Run("uninstall the agent", func(tt *testing.T) { @@ -300,3 +320,47 @@ func (t *Tester) TestUninstall(tt *testing.T, logfile string) bool { t.testDoesNotChangeSystemFiles(tt) }) } + +func (t *Tester) testRunningExpectedVersion(tt *testing.T) bool { + return tt.Run("running expected version", func(tt *testing.T) { + installedVersion, err := t.InstallTestClient.GetAgentVersion() + require.NoError(tt, err, "should get agent version") + windowsAgent.TestAgentVersion(tt, t.agentPackage.AgentVersion(), installedVersion) + }) +} + +// InstallAgent installs the agent +func (t *Tester) InstallAgent(tt *testing.T, args string, logfile string) error { + var err error + t.remoteMSIPath, err = t.InstallAgentPackage(tt, t.agentPackage, args, logfile) + return err +} + +// Only do some basic checks on the agent since it's a previous version +func (t *Tester) testPreviousVersionExpectations(tt *testing.T) { + common.CheckAgentBehaviour(tt, t.InstallTestClient) +} + +// More in depth checks on current version +func (t *Tester) testCurrentVersionExpectations(tt *testing.T) { + if t.remoteMSIPath != "" { + windowsAgent.TestValidDatadogCodeSignatures(tt, t.host, []string{t.remoteMSIPath}) + } + common.CheckInstallation(tt, t.InstallTestClient) + t.testAgentCodeSignature(tt) + t.TestRuntimeExpectations(tt) +} + +// TestExpectations tests the current agent installation meets the expectations provided to the Tester +func (t *Tester) TestExpectations(tt *testing.T) bool { + return tt.Run(fmt.Sprintf("test %s", t.agentPackage.AgentVersion()), func(tt *testing.T) { + if !t.testRunningExpectedVersion(tt) { + tt.FailNow() + } + if t.isPreviousVersion { + t.testPreviousVersionExpectations(tt) + } else { + t.testCurrentVersionExpectations(tt) + } + }) +} diff --git a/test/new-e2e/tests/windows/msi.go b/test/new-e2e/tests/windows/msi.go index 4641c900f75e82..b4aeeb20cc8e44 100644 --- a/test/new-e2e/tests/windows/msi.go +++ b/test/new-e2e/tests/windows/msi.go @@ -12,44 +12,57 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" ) -// InstallMSI installs an MSI on the VM with the provided args and collects the install log -func InstallMSI(host *components.RemoteHost, msiPath string, args string, logPath string) error { +// MsiExec runs msiexec on the VM with the provided operation and args and collects the log +// +// args may need to be escaped/quoted. The Start-Process ArgumentList parameter value is wrapped in single quotes. For example: +// - Start-Process -Wait msiexec -PassThru -ArgumentList '/qn /l "logfile" /i "msipath" APIKEY="00000000000000000000000000000000"' +// - https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.management/start-process?view=powershell-7.4#example-7-specifying-arguments-to-the-process +// - https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_quoting_rules?view=powershell-7.4 +func MsiExec(host *components.RemoteHost, operation string, product string, args string, logPath string) error { remoteLogPath, err := GetTemporaryFile(host) if err != nil { return err } - cmd := fmt.Sprintf(`Exit (Start-Process -Wait msiexec -PassThru -ArgumentList '/qn /l %s /i %s %s').ExitCode`, - remoteLogPath, msiPath, args) - - output, installErr := host.Execute(cmd) - // Collect the install log + args = fmt.Sprintf(`/qn /l "%s" %s "%s" %s`, remoteLogPath, operation, product, args) + cmd := fmt.Sprintf(`Exit (Start-Process -Wait msiexec -PassThru -ArgumentList '%s').ExitCode`, args) + _, msiExecErr := host.Execute(cmd) + // Always collect the log file, return error after err = host.GetFile(remoteLogPath, logPath) if err != nil { - fmt.Printf("failed to collect install log: %s\n", err) + fmt.Printf("failed to collect msiexec log: %s\n", err) } - if installErr != nil { - return fmt.Errorf("failed to install MSI: %w\n%s", installErr, output) + return msiExecErr +} + +// InstallMSI installs an MSI on the VM with the provided args and collects the install log +// +// args may need to be escaped/quoted, see MsiExec() for details +func InstallMSI(host *components.RemoteHost, msiPath string, args string, logPath string) error { + err := MsiExec(host, "/i", msiPath, args, logPath) + if err != nil { + return fmt.Errorf("failed to install MSI: %w", err) } return nil } // UninstallMSI uninstalls an MSI on the VM and collects the uninstall log func UninstallMSI(host *components.RemoteHost, msiPath string, logPath string) error { - remoteLogPath, err := GetTemporaryFile(host) + err := MsiExec(host, "/x", msiPath, "", logPath) if err != nil { - return err + return fmt.Errorf("failed to uninstall MSI: %w", err) } - cmd := fmt.Sprintf("Exit (start-process -passthru -wait msiexec.exe -argumentList /x,'%s',/qn,/l,%s).ExitCode", msiPath, remoteLogPath) + return nil +} - output, uninstallErr := host.Execute(cmd) - // Collect the install log - err = host.GetFile(remoteLogPath, logPath) +// RepairAllMSI repairs an MSI with /fa on the VM and collects the repair log +// +// /fa: a - forces all files to be reinstalled +// +// args may need to be escaped/quoted, see MsiExec() for details +func RepairAllMSI(host *components.RemoteHost, msiPath string, args string, logPath string) error { + err := MsiExec(host, "/fa", msiPath, args, logPath) if err != nil { - fmt.Printf("failed to collect uninstall log: %s\n", err) - } - - if uninstallErr != nil { - return fmt.Errorf("failed to uninstall MSI: %w\n%s", uninstallErr, output) + return fmt.Errorf("failed to repair MSI: %w", err) } return nil } diff --git a/tools/retry_file_dump/go.mod b/tools/retry_file_dump/go.mod index 5ad1ff1d77c76e..b045551f60cebf 100644 --- a/tools/retry_file_dump/go.mod +++ b/tools/retry_file_dump/go.mod @@ -5,7 +5,7 @@ module github.com/DataDog/datadog-agent/tools/retry_file_dump -go 1.20 +go 1.21 require github.com/golang/protobuf v1.4.3 diff --git a/tools/windows/DatadogAgentInstaller/WixSetup/dialogs/ddlicense.wxi b/tools/windows/DatadogAgentInstaller/WixSetup/dialogs/ddlicense.wxi index dac522d2cefa8e..803dda3ec82ea0 100644 --- a/tools/windows/DatadogAgentInstaller/WixSetup/dialogs/ddlicense.wxi +++ b/tools/windows/DatadogAgentInstaller/WixSetup/dialogs/ddlicense.wxi @@ -6,9 +6,6 @@ - - 1 - !(wix.WixUICostingPopupOptOut) OR CostingComplete = 1 @@ -21,8 +18,5 @@ !(loc.LicenseAgreementSubHeader) - - - diff --git a/tools/windows/DatadogAgentInstaller/WixSetup/localization-en-us.wxl b/tools/windows/DatadogAgentInstaller/WixSetup/localization-en-us.wxl index b78093a29d4aab..bd44a9c1f7c5c7 100644 --- a/tools/windows/DatadogAgentInstaller/WixSetup/localization-en-us.wxl +++ b/tools/windows/DatadogAgentInstaller/WixSetup/localization-en-us.wxl @@ -65,11 +65,11 @@ [ProductName] Setup - I &agree. + I &accept the terms in the license agreement. {\WixUI_Font_Title_White}Software Licensing - {\WixUI_Font_Normal_White}Please read the following license agreement carefully + {\WixUI_Font_Normal_White}Please read the provided license agreement carefully - The Datadog Agent]]> includes open source software and is packaged with a closed source Datadog device driver, which is required for the NPM and USM products. The driver is licensed under the following End User License Agreement. + Use and operation of this software is governed by the End User License Agreement (EULA) located at https://www.datadoghq.com/legal/eula/]]>. [ProductName] Setup E&xit